jittor.models.shufflenetv2 源代码


# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved. 
# Maintainers: 
#     Wenyang Zhou <576825820@qq.com>
#     Dun Liang <randonlang@gmail.com>. 
# 
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# This model is generated by pytorch converter.
import jittor as jt
from jittor import nn

__all__ = ['ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0']

def channel_shuffle(x, groups):
    (batchsize, num_channels, height, width) = x.data.shape
    channels_per_group = (num_channels // groups)
    x = jt.reshape(x, [batchsize, groups, channels_per_group, height, width])
    x = jt.transpose(x, (0,2,1,3,4))
    x = jt.reshape(x, [batchsize, (- 1), height, width])
    return x

class InvertedResidual(nn.Module):
    def __init__(self, inp, oup, stride):
        super(InvertedResidual, self).__init__()
        if (not (1 <= stride <= 3)):
            raise ValueError('illegal stride value')
        self.stride = stride
        branch_features = (oup // 2)
        assert ((self.stride != 1) or (inp == (branch_features << 1)))
        if (self.stride > 1):
            self.branch1 = nn.Sequential(self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm(inp), nn.Conv(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm(branch_features), nn.Relu())
        else:
            self.branch1 = nn.Sequential()
        self.branch2 = nn.Sequential(nn.Conv((inp if (self.stride > 1) else branch_features), branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm(branch_features), nn.Relu(), self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm(branch_features), nn.Conv(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm(branch_features), nn.Relu())

    @staticmethod
    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
        return nn.Conv(i, o, kernel_size, stride, padding, bias=bias, groups=i)

    def execute(self, x):
        if (self.stride == 1):
            x1 = x[:,0:x.shape[1]//2]
            x2 = x[:,x.shape[1]//2:x.shape[1]]
            out = jt.concat([x1, self.branch2(x2)], dim=1)
        else:
            out = jt.concat([self.branch1(x), self.branch2(x)], dim=1)
        out = channel_shuffle(out, 2)
        return out

[文档] class ShuffleNetV2(nn.Module): ''' ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - stages_repeats (list[int]): 每个阶段中InvertedResidual模块的重复次数, 长度应为3。 - stages_out_channels (list[int]): 每个阶段最终输出通道数, 长度应为5。 - num_classes (int, optional): 分类的类别数, 默认为1000。 - inverted_residual (nn.Module, optional): 用于构建每个阶段的反转残差模块, 默认为InvertedResidual类。默认值: None 属性: - stages_repeats (list[int]): 每个阶段中InvertedResidual模块的重复次数。 - stages_out_channels (list[int]): 每个阶段最终输出通道数。 - num_classes (int): 分类的类别数。 - inverted_residual (nn.Module): 用于构建每个阶段的反转残差模块。 代码示例: >>> import jittor as jt >>> from jittor.models.shufflenetv2 import ShuffleNetV2 >>> stages_repeats = [4, 8, 4] >>> stages_out_channels = [24, 48, 96, 192, 1024] >>> net = ShuffleNetV2(stages_repeats, stages_out_channels, num_classes=1000) >>> input = jt.randn(10, 3, 256, 256) # 假设是一个形状为[批次大小, 通道数, 高度, 宽度]的张量 >>> net(input).shape [10,1000,] ''' def __init__(self, stages_repeats, stages_out_channels, num_classes=1000, inverted_residual=InvertedResidual): super(ShuffleNetV2, self).__init__() if (len(stages_repeats) != 3): raise ValueError('expected stages_repeats as list of 3 positive ints') if (len(stages_out_channels) != 5): raise ValueError('expected stages_out_channels as list of 5 positive ints') self._stage_out_channels = stages_out_channels input_channels = 3 output_channels = self._stage_out_channels[0] self.conv1 = nn.Sequential(nn.Conv(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm(output_channels), nn.Relu()) input_channels = output_channels self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1, op='maximum') stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] for (name, repeats, output_channels) in zip(stage_names, stages_repeats, self._stage_out_channels[1:]): seq = [inverted_residual(input_channels, output_channels, 2)] for i in range((repeats - 1)): seq.append(inverted_residual(output_channels, output_channels, 1)) setattr(self, name, nn.Sequential(*seq)) input_channels = output_channels output_channels = self._stage_out_channels[(- 1)] self.conv5 = nn.Sequential(nn.Conv(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm(output_channels), nn.Relu()) self.fc = nn.Linear(output_channels, num_classes) def _forward_impl(self, x): x = self.conv1(x) x = self.maxpool(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.conv5(x) x = x.mean([2, 3]) x = self.fc(x) return x def execute(self, x): return self._forward_impl(x)
def _shufflenetv2(arch, *args): ''' 创建ShuffleNetV2模型 ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - `arch` (str): 用于指定模型的架构样式。 - `*args`: 额外参数, 用于构造ShuffleNetV2模型。 代码示例: >>> model = _shufflenetv2('shufflenetv2_1x') 返回值: - 返回一个新的ShuffleNetV2模型实例。 ''' model = ShuffleNetV2(*args) return model
[文档] def shufflenet_v2_x0_5(pretrained=False): ''' 构建一个shufflenet_v2_x0_5模型 ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 返回值: - 返回构建好的shufflenet_v2_x0_5模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.shufflenetv2 import * >>> net = shufflenet_v2_x0_5(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = _shufflenetv2('shufflenetv2_x0.5', [4, 8, 4], [24, 48, 96, 192, 1024]) if pretrained: model.load("jittorhub://shufflenet_v2_x0_5.pkl") return model
[文档] def shufflenet_v2_x1_0(pretrained=False): ''' 构建一个shufflenet_v2_x1_0模型 ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 返回值: - 返回构建好的shufflenet_v2_x1_0模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.shufflenetv2 import * >>> net = shufflenet_v2_x1_0(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = _shufflenetv2('shufflenetv2_x1.0', [4, 8, 4], [24, 116, 232, 464, 1024]) if pretrained: model.load("jittorhub://shufflenet_v2_x1_0.pkl") return model
[文档] def shufflenet_v2_x1_5(pretrained=False): ''' 构建一个shufflenet_v2_x1_5模型 ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 返回值: - 返回构建好的shufflenet_v2_x1_5模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.shufflenetv2 import * >>> net = shufflenet_v2_x1_5(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = _shufflenetv2('shufflenetv2_x1.5', [4, 8, 4], [24, 176, 352, 704, 1024]) if pretrained: model.load("jittorhub://shufflenet_v2_x1_5.pkl") return model
[文档] def shufflenet_v2_x2_0(pretrained=False): ''' 构建一个shufflenet_v2_x2_0模型 ShuffleNetV2源自论文 `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design <https://arxiv.org/abs/1807.11164>`__, 其核心思想在于统筹通道分割与通道打乱操作, 减少计算复杂度同时保持模型性能。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 返回值: - 返回构建好的shufflenet_v2_x2_0模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.shufflenetv2 import * >>> net = shufflenet_v2_x2_0(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = _shufflenetv2('shufflenetv2_x2.0', [4, 8, 4], [24, 244, 488, 976, 2048]) if pretrained: model.load("jittorhub://shufflenet_v2_x2_0.pkl") return model