| @@ -181,7 +181,7 @@ result: {'acc': 0.71976314102564111} ckpt=/path/to/checkpoint/mobilenet-200_625. | |||
| | Parameters | MobilenetV2 | | | |||
| | -------------------------- | ---------------------------------------------------------- | ------------------------- | | |||
| | Model Version | | large | | |||
| | Model Version | V1 | V1 | | |||
| | Resource | Ascend 910, cpu:2.60GHz 56cores, memory:314G | NV SMX2 V100-32G | | |||
| | uploaded Date | 05/06/2020 | 05/06/2020 | | |||
| | MindSpore Version | 0.3.0 | 0.3.0 | | |||
| @@ -189,29 +189,13 @@ result: {'acc': 0.71976314102564111} ckpt=/path/to/checkpoint/mobilenet-200_625. | |||
| | Training Parameters | src/config.py | src/config.py | | |||
| | Optimizer | Momentum | Momentum | | |||
| | Loss Function | SoftmaxCrossEntropy | SoftmaxCrossEntropy | | |||
| | outputs | | | | |||
| | Loss | | 1.913 | | |||
| | Accuracy | | ACC1[77.09%] ACC5[92.57%] | | |||
| | Total time | | | | |||
| | Params (M) | | | | |||
| | Checkpoint for Fine tuning | | | | |||
| | Model for inference | | | | |||
| #### Inference Performance | |||
| | Parameters | | | | | |||
| | -------------------------- | ----------------------------- | ------------------------- | -------------------- | | |||
| | Model Version | V1 | | | | |||
| | Resource | Ascend 910 | NV SMX2 V100-32G | Ascend 310 | | |||
| | uploaded Date | 05/06/2020 | 05/22/2020 | | | |||
| | MindSpore Version | 0.2.0 | 0.2.0 | 0.2.0 | | |||
| | Dataset | ImageNet, 1.2W | ImageNet, 1.2W | ImageNet, 1.2W | | |||
| | batch_size | | 130(8P) | | | |||
| | outputs | | | | | |||
| | Accuracy | | ACC1[72.07%] ACC5[90.90%] | | | |||
| | Speed | | | | | |||
| | Total time | | | | | |||
| | Model for inference | | | | | |||
| | outputs | probability | probability | | |||
| | Loss | 1.908 | 1.913 | | |||
| | Accuracy | ACC1[71.78%] | ACC1[71.08%] | | |||
| | Total time | 753 min | 845 min | | |||
| | Params (M) | 3.3 M | 3.3 M | | |||
| | Checkpoint for Fine tuning | 27.3 M | 27.3 M | | |||
| | Scripts | [Link](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/cv/mobilenetv2)| | |||
| # [Description of Random Situation](#contents) | |||
| @@ -21,18 +21,14 @@ from mindspore.common import dtype as mstype | |||
| from src.dataset import create_dataset | |||
| from src.config import set_config | |||
| from src.mobilenetV2 import MobileNetV2Backbone, MobileNetV2Head, mobilenet_v2 | |||
| from src.args import eval_parse_args | |||
| from src.models import load_ckpt | |||
| from src.models import define_net, load_ckpt | |||
| from src.utils import switch_precision, set_context | |||
| if __name__ == '__main__': | |||
| args_opt = eval_parse_args() | |||
| config = set_config(args_opt) | |||
| backbone_net = MobileNetV2Backbone(platform=args_opt.platform) | |||
| head_net = MobileNetV2Head(input_channel=backbone_net.out_channels, num_classes=config.num_classes) | |||
| net = mobilenet_v2(backbone_net, head_net) | |||
| backbone_net, head_net, net = define_net(args_opt, config) | |||
| #load the trained checkpoint file to the net for evaluation | |||
| if args_opt.head_ckpt: | |||
| @@ -51,7 +47,7 @@ if __name__ == '__main__': | |||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||
| model = Model(net, loss_fn=loss, metrics={'acc'}) | |||
| res = model.eval(dataset, dataset_sink_mode=False) | |||
| res = model.eval(dataset) | |||
| print(f"result:{res}\npretrain_ckpt={args_opt.pretrain_ckpt}") | |||
| if args_opt.head_ckpt: | |||
| print(f"head_ckpt={args_opt.head_ckpt}") | |||
| @@ -72,7 +72,7 @@ class ConvBNReLU(nn.Cell): | |||
| >>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1) | |||
| """ | |||
| def __init__(self, platform, in_planes, out_planes, kernel_size=3, stride=1, groups=1): | |||
| def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): | |||
| super(ConvBNReLU, self).__init__() | |||
| padding = (kernel_size - 1) // 2 | |||
| in_channels = in_planes | |||
| @@ -109,7 +109,7 @@ class InvertedResidual(nn.Cell): | |||
| >>> ResidualBlock(3, 256, 1, 1) | |||
| """ | |||
| def __init__(self, platform, inp, oup, stride, expand_ratio): | |||
| def __init__(self, inp, oup, stride, expand_ratio): | |||
| super(InvertedResidual, self).__init__() | |||
| assert stride in [1, 2] | |||
| @@ -118,10 +118,10 @@ class InvertedResidual(nn.Cell): | |||
| layers = [] | |||
| if expand_ratio != 1: | |||
| layers.append(ConvBNReLU(platform, inp, hidden_dim, kernel_size=1)) | |||
| layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) | |||
| layers.extend([ | |||
| # dw | |||
| ConvBNReLU(platform, hidden_dim, hidden_dim, | |||
| ConvBNReLU(hidden_dim, hidden_dim, | |||
| stride=stride, groups=hidden_dim), | |||
| # pw-linear | |||
| nn.Conv2d(hidden_dim, oup, kernel_size=1, | |||
| @@ -157,7 +157,7 @@ class MobileNetV2Backbone(nn.Cell): | |||
| >>> MobileNetV2(num_classes=1000) | |||
| """ | |||
| def __init__(self, platform, width_mult=1., inverted_residual_setting=None, round_nearest=8, | |||
| def __init__(self, width_mult=1., inverted_residual_setting=None, round_nearest=8, | |||
| input_channel=32, last_channel=1280): | |||
| super(MobileNetV2Backbone, self).__init__() | |||
| block = InvertedResidual | |||
| @@ -178,16 +178,16 @@ class MobileNetV2Backbone(nn.Cell): | |||
| # building first layer | |||
| input_channel = _make_divisible(input_channel * width_mult, round_nearest) | |||
| self.out_channels = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) | |||
| features = [ConvBNReLU(platform, 3, input_channel, stride=2)] | |||
| features = [ConvBNReLU(3, input_channel, stride=2)] | |||
| # building inverted residual blocks | |||
| for t, c, n, s in self.cfgs: | |||
| output_channel = _make_divisible(c * width_mult, round_nearest) | |||
| for i in range(n): | |||
| stride = s if i == 0 else 1 | |||
| features.append(block(platform, input_channel, output_channel, stride, expand_ratio=t)) | |||
| features.append(block(input_channel, output_channel, stride, expand_ratio=t)) | |||
| input_channel = output_channel | |||
| # building last several layers | |||
| features.append(ConvBNReLU(platform, input_channel, self.out_channels, kernel_size=1)) | |||
| features.append(ConvBNReLU(input_channel, self.out_channels, kernel_size=1)) | |||
| # make it nn.CellList | |||
| self.features = nn.SequentialCell(features) | |||
| self._initialize_weights() | |||
| @@ -293,10 +293,10 @@ class MobileNetV2(nn.Cell): | |||
| >>> MobileNetV2(backbone, head) | |||
| """ | |||
| def __init__(self, platform, num_classes=1000, width_mult=1., has_dropout=False, inverted_residual_setting=None, \ | |||
| def __init__(self, num_classes=1000, width_mult=1., has_dropout=False, inverted_residual_setting=None, \ | |||
| round_nearest=8, input_channel=32, last_channel=1280): | |||
| super(MobileNetV2, self).__init__() | |||
| self.backbone = MobileNetV2Backbone(platform=platform, width_mult=width_mult, \ | |||
| self.backbone = MobileNetV2Backbone(width_mult=width_mult, \ | |||
| inverted_residual_setting=inverted_residual_setting, \ | |||
| round_nearest=round_nearest, input_channel=input_channel, last_channel=last_channel).get_features | |||
| self.head = MobileNetV2Head(input_channel=self.backbone.out_channel, num_classes=num_classes, \ | |||
| @@ -120,7 +120,7 @@ def load_ckpt(network, pretrain_ckpt_path, trainable=True): | |||
| param.requires_grad = False | |||
| def define_net(args, config): | |||
| backbone_net = MobileNetV2Backbone(platform=args.platform) | |||
| backbone_net = MobileNetV2Backbone() | |||
| head_net = MobileNetV2Head(input_channel=backbone_net.out_channels, num_classes=config.num_classes) | |||
| net = mobilenet_v2(backbone_net, head_net) | |||
| @@ -145,29 +145,13 @@ result: {'acc': 0.71976314102564111} ckpt=/path/to/checkpoint/mobilenet-200_625. | |||
| | Training Parameters | src/config.py | | |||
| | Optimizer | Momentum | | |||
| | Loss Function | SoftmaxCrossEntropy | | |||
| | outputs | | | |||
| | outputs | probability | | |||
| | Loss | 1.913 | | |||
| | Accuracy | ACC1[77.57%] ACC5[92.51%] | | |||
| | Total time | | | |||
| | Params (M) | | | |||
| | Checkpoint for Fine tuning | | | |||
| | Model for inference | | | |||
| #### Inference Performance | |||
| | Parameters | | | |||
| | -------------------------- | -------------------- | | |||
| | Model Version | | | |||
| | Resource | NV SMX2 V100-32G | | |||
| | uploaded Date | 05/22/2020 | | |||
| | MindSpore Version | 0.2.0 | | |||
| | Dataset | ImageNet, 1.2W | | |||
| | batch_size | 130(8P) | | |||
| | outputs | | | |||
| | Accuracy | ACC1[75.43%] ACC5[92.51%] | | |||
| | Speed | | | |||
| | Total time | | | |||
| | Model for inference | | | |||
| | Total time | 1433 min | | |||
| | Params (M) | 5.48 M | | |||
| | Checkpoint for Fine tuning | 44 M | | |||
| | Scripts | [Link](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/cv/mobilenetv3)| | |||
| # [Description of Random Situation](#contents) | |||
| @@ -46,7 +46,7 @@ fi | |||
| mkdir ../eval | |||
| cd ../eval || exit | |||
| # luanch | |||
| # launch | |||
| python ${BASEPATH}/../eval.py \ | |||
| --device_target=$1 \ | |||
| --dataset_path=$2 \ | |||
| @@ -46,7 +46,7 @@ fi | |||
| mkdir ../eval | |||
| cd ../eval || exit | |||
| # luanch | |||
| # launch | |||
| python ${BASEPATH}/../eval.py \ | |||
| --device_target=$1 \ | |||
| --dataset_path=$2 \ | |||