jittor.models.mnasnet 源代码

# ***************************************************************
# Copyright (c) 2023 Jittor. All Rights Reserved. 
# Maintainers: 
#     Wenyang Zhou <576825820@qq.com>
#     Dun Liang <randonlang@gmail.com>. 
# 
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# This model is generated by pytorch converter.

import jittor as jt
from jittor import nn
__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3']
_BN_MOMENTUM = (1 - 0.9997)

class _InvertedResidual(nn.Module):
    def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, bn_momentum=0.1):
        super(_InvertedResidual, self).__init__()
        assert (stride in [1, 2])
        assert (kernel_size in [3, 5])
        mid_ch = (in_ch * expansion_factor)
        self.apply_residual = ((in_ch == out_ch) and (stride == 1))
        self.layers = nn.Sequential(nn.Conv(in_ch, mid_ch, 1, bias=False), nn.BatchNorm(mid_ch, momentum=bn_momentum), nn.Relu(), nn.Conv(mid_ch, mid_ch, kernel_size, padding=(kernel_size // 2), stride=stride, groups=mid_ch, bias=False), nn.BatchNorm(mid_ch, momentum=bn_momentum), nn.Relu(), nn.Conv(mid_ch, out_ch, 1, bias=False), nn.BatchNorm(out_ch, momentum=bn_momentum))

    def execute(self, input):
        if self.apply_residual:
            return (self.layers(input) + input)
        else:
            return self.layers(input)

def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats, bn_momentum):
    assert (repeats >= 1)
    first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum)
    remaining = []
    for _ in range(1, repeats):
        remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))
    return nn.Sequential(first, *remaining)

def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
    assert (0.0 < round_up_bias < 1.0)
    new_val = max(divisor, ((int((val + (divisor / 2))) // divisor) * divisor))
    return (new_val if (new_val >= (round_up_bias * val)) else (new_val + divisor))

def _get_depths(alpha):
    depths = [24, 40, 80, 96, 192, 320]
    return [_round_to_multiple_of((depth * alpha), 8) for depth in depths]

[文档] class MNASNet(nn.Module): ''' MnasNet源自论文 `MnasNet: Platform-Aware Neural Architecture Search for Mobile <https://arxiv.org/abs/1807.11626>`__, 其使用深度可分卷积和线性瓶颈层来减少计算复杂性, 同时仍然保持了表示能力。 在MNASNet中的深度可分离卷积操作可通过以下方式表达: .. math:: y = DConv(x) * PWConv(x) 其中 :math:`DConv` 表示逐深度卷积, :math:`PWConv` 表示点卷积操作。 参数: * alpha (float): 深度乘数, 用于控制网络层的深度缩放因子。在初始化时传入的 `alpha` 参数必须大于0。 * num_classes (int, optional): 分类类别的数量。默认值: 1000。 * dropout (float, optional): Dropout层的丢弃概率。默认值: 0.2。 属性: - layers (nn.Sequential): MNASNet的特征提取部分。 - classifier (nn.Sequential): MNASNet的分类部分。 代码示例: >>> import jittor as jt >>> from jittor.models.mnasnet import MNASNet >>> network = MNASNet(alpha=1.0, num_classes=1000, dropout=0.2) >>> input = jt.randn(10, 3, 256, 256) # 假设是一个形状为[批次大小, 通道数, 高度, 宽度]的张量 >>> network(input).shape [10,1000,] ''' _version = 2 def __init__(self, alpha, num_classes=1000, dropout=0.2): super(MNASNet, self).__init__() assert (alpha > 0.0) self.alpha = alpha self.num_classes = num_classes depths = _get_depths(alpha) layers = [ nn.Conv(3, 32, 3, padding=1, stride=2, bias=False), nn.BatchNorm(32, momentum=_BN_MOMENTUM), nn.Relu(), nn.Conv(32, 32, 3, padding=1, stride=1, groups=32, bias=False), nn.BatchNorm(32, momentum=_BN_MOMENTUM), nn.Relu(), nn.Conv(32, 16, 1, padding=0, stride=1, bias=False), nn.BatchNorm(16, momentum=_BN_MOMENTUM), _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM), _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM), _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM), _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM), _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM), _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM), nn.Conv(depths[5], 1280, 1, padding=0, stride=1, bias=False), nn.BatchNorm(1280, momentum=_BN_MOMENTUM), nn.Relu() ] self.layers = nn.Sequential(*layers) self.classifier = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(1280, num_classes)) def execute(self, x): x = self.layers(x) x = x.mean([2, 3]) return self.classifier(x)
[文档] def mnasnet0_5(pretrained=False, **kwargs): ''' 构建一个MnasNet 0_5模型 MnasNet源自论文 `MnasNet: Platform-Aware Neural Architecture Search for Mobile <https://arxiv.org/abs/1807.11626>`__, 其使用深度可分卷积和线性瓶颈层来减少计算复杂性, 同时仍然保持了表示能力。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 - `kwargs`: 其他optional参数。 返回值: - 返回构建好的MnasNet 0_5模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.mnasnet import * >>> net = mnasnet0_5(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = MNASNet(0.5, **kwargs) if pretrained: model.load("jittorhub://mnasnet0_5.pkl") return model
[文档] def mnasnet0_75(pretrained=False, **kwargs): ''' 构建一个MnasNet 0_75模型 MnasNet源自论文 `MnasNet: Platform-Aware Neural Architecture Search for Mobile <https://arxiv.org/abs/1807.11626>`__, 其使用深度可分卷积和线性瓶颈层来减少计算复杂性, 同时仍然保持了表示能力。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 - `kwargs`: 其他optional参数。 返回值: - 返回构建好的MnasNet 0_75模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.mnasnet import * >>> net = mnasnet0_75(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = MNASNet(0.75, **kwargs) if pretrained: model.load("jittorhub://mnasnet0_75.pkl") return model
[文档] def mnasnet1_0(pretrained=False, **kwargs): ''' 构建一个MnasNet 1_0模型 MnasNet源自论文 `《MnasNet: Platform-Aware Neural Architecture Search for Mobile》 <https://arxiv.org/abs/1807.11626>`_ 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 - `kwargs`: 其他optional参数。 返回值: - 返回构建好的MnasNet 1_0模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.mnasnet import * >>> net = mnasnet1_0(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = MNASNet(1.0, **kwargs) if pretrained: model.load("jittorhub://mnasnet1_0.pkl") return model
[文档] def mnasnet1_3(pretrained=False, **kwargs): ''' 构建一个MnasNet 1_3模型 MnasNet源自论文 `MnasNet: Platform-Aware Neural Architecture Search for Mobile <https://arxiv.org/abs/1807.11626>`__, 其使用深度可分卷积和线性瓶颈层来减少计算复杂性, 同时仍然保持了表示能力。 参数: - `pretrained` (bool, optional): 表示是否预加载预训练模型。默认为 `False`。 - `kwargs`: 其他optional参数。 返回值: - 返回构建好的MnasNet 1_3模型实例。如果 `pretrained` 为 `True`, 则返回在ImageNet上预训练的模型。 代码示例: >>> import jittor as jt >>> from jittor.models.mnasnet import * >>> net = mnasnet1_3(pretrained=False) >>> x = jt.rand(1, 3, 224, 224) >>> y = net(x) >>> y.shape [1, 1000] ''' model = MNASNet(1.3, **kwargs) if pretrained: model.load("jittorhub://mnasnet1_3.pkl") return model