Browse Source

delete redundant codes

tags/v1.0.0
zhaoting 5 years ago
parent
commit
d421f49e60
62 changed files with 61 additions and 96 deletions
  1. +1
    -1
      model_zoo/official/cv/alexnet/README.md
  2. +1
    -1
      model_zoo/official/cv/deeplabv3/src/data/data_generator.py
  3. +1
    -1
      model_zoo/official/cv/faster_rcnn/src/FasterRcnn/bbox_assign_sample_stage2.py
  4. +0
    -3
      model_zoo/official/cv/googlenet/train.py
  5. +1
    -1
      model_zoo/official/cv/inceptionv3/README.md
  6. +1
    -1
      model_zoo/official/cv/lenet/README.md
  7. +2
    -2
      model_zoo/official/cv/lenet/src/lenet.py
  8. +1
    -1
      model_zoo/official/cv/lenet_quant/src/lenet_fusion.py
  9. +1
    -1
      model_zoo/official/cv/lenet_quant/src/lenet_quant.py
  10. +1
    -1
      model_zoo/official/cv/maskrcnn/src/maskrcnn/bbox_assign_sample_stage2.py
  11. +1
    -1
      model_zoo/official/cv/mobilenetv2/README.md
  12. +5
    -3
      model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py
  13. +2
    -2
      model_zoo/official/cv/mobilenetv2/src/models.py
  14. +2
    -2
      model_zoo/official/cv/mobilenetv2_quant/src/utils.py
  15. +1
    -1
      model_zoo/official/cv/mobilenetv3/Readme.md
  16. +2
    -2
      model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py
  17. +2
    -2
      model_zoo/official/cv/mobilenetv3/train.py
  18. +0
    -1
      model_zoo/official/cv/nasnet/src/dataset.py
  19. +2
    -2
      model_zoo/official/cv/resnet_thor/src/dataset_helper.py
  20. +1
    -1
      model_zoo/official/cv/resnet_thor/src/resnet_thor.py
  21. +1
    -1
      model_zoo/official/cv/resnext50/README.md
  22. +0
    -3
      model_zoo/official/cv/resnext50/src/utils/optimizers__init__.py
  23. +1
    -1
      model_zoo/official/cv/shufflenetv2/Readme.md
  24. +0
    -1
      model_zoo/official/cv/shufflenetv2/src/dataset.py
  25. +0
    -1
      model_zoo/official/cv/shufflenetv2/src/shufflenetv2.py
  26. +0
    -1
      model_zoo/official/cv/ssd/src/dataset.py
  27. +0
    -1
      model_zoo/official/cv/vgg16/train.py
  28. +1
    -1
      model_zoo/official/cv/yolov3_darknet53/src/transforms.py
  29. +1
    -1
      model_zoo/official/cv/yolov3_darknet53_quant/src/transforms.py
  30. +0
    -1
      model_zoo/official/cv/yolov3_resnet18/src/dataset.py
  31. +1
    -1
      model_zoo/official/nlp/bert/pretrain_eval.py
  32. +2
    -2
      model_zoo/official/nlp/bert/src/bert_model.py
  33. +0
    -1
      model_zoo/official/nlp/bert/src/utils.py
  34. +1
    -1
      model_zoo/official/nlp/bert_thor/README.md
  35. +1
    -1
      model_zoo/official/nlp/bert_thor/pretrain_eval.py
  36. +2
    -2
      model_zoo/official/nlp/bert_thor/src/bert_model.py
  37. +1
    -1
      model_zoo/official/nlp/bert_thor/src/dataset_helper.py
  38. +2
    -2
      model_zoo/official/nlp/bert_thor/src/thor_for_bert.py
  39. +2
    -2
      model_zoo/official/nlp/bert_thor/src/thor_for_bert_arg.py
  40. +0
    -1
      model_zoo/official/nlp/bert_thor/src/utils.py
  41. +0
    -1
      model_zoo/official/nlp/lstm/eval.py
  42. +2
    -2
      model_zoo/official/nlp/lstm/src/imdb.py
  43. +0
    -1
      model_zoo/official/nlp/lstm/train.py
  44. +1
    -1
      model_zoo/official/nlp/mass/README.md
  45. +1
    -2
      model_zoo/official/nlp/mass/src/transformer/beam_search.py
  46. +0
    -2
      model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py
  47. +2
    -2
      model_zoo/official/nlp/tinybert/src/tinybert_model.py
  48. +1
    -1
      model_zoo/official/nlp/transformer/src/beam_search.py
  49. +1
    -1
      model_zoo/official/nlp/transformer/src/transformer_for_train.py
  50. +1
    -0
      model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py
  51. +1
    -1
      model_zoo/research/cv/ghostnet/Readme.md
  52. +0
    -1
      model_zoo/research/cv/ghostnet/src/ghostnet600.py
  53. +1
    -1
      model_zoo/research/cv/ghostnet_quant/Readme.md
  54. +1
    -1
      model_zoo/research/cv/ghostnet_quant/src/ghostnet.py
  55. +1
    -1
      model_zoo/research/cv/resnet50_adv_pruning/Readme.md
  56. +1
    -1
      model_zoo/research/cv/ssd_ghostnet/README.md
  57. +0
    -8
      model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py
  58. +0
    -3
      model_zoo/research/cv/ssd_ghostnet/train.py
  59. +0
    -1
      model_zoo/utils/graph_to_mindrecord/writer.py
  60. +1
    -6
      tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py
  61. +1
    -2
      tests/st/networks/models/bert/src/fused_layer_norm.py
  62. +1
    -1
      tests/st/networks/models/deeplabv3/src/md_dataset.py

+ 1
- 1
model_zoo/official/cv/alexnet/README.md View File

@@ -50,7 +50,7 @@ Dataset used: [CIFAR-10](<http://www.cs.toronto.edu/~kriz/cifar.html>)
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. - Prepare hardware environment with Ascend or GPU processor.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 1
model_zoo/official/cv/deeplabv3/src/data/data_generator.py View File

@@ -13,8 +13,8 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
import cv2
import numpy as np import numpy as np
import cv2
import mindspore.dataset as de import mindspore.dataset as de
cv2.setNumThreads(0) cv2.setNumThreads(0)


+ 1
- 1
model_zoo/official/cv/faster_rcnn/src/FasterRcnn/bbox_assign_sample_stage2.py View File

@@ -114,7 +114,7 @@ class BboxAssignSampleForRcnn(nn.Cell):
bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \ bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \
(self.num_bboxes, 1)), (1, 4)), mstype.bool_), \ (self.num_bboxes, 1)), (1, 4)), mstype.bool_), \
bboxes, self.check_anchor_two) bboxes, self.check_anchor_two)
# 1 dim = gt, 2 dim = bbox
overlaps = self.iou(bboxes, gt_bboxes_i) overlaps = self.iou(bboxes, gt_bboxes_i)


max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps) max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)


+ 0
- 3
model_zoo/official/cv/googlenet/train.py View File

@@ -166,15 +166,12 @@ if __name__ == '__main__':
parameter_name = x.name parameter_name = x.name
if parameter_name.endswith('.bias'): if parameter_name.endswith('.bias'):
# all bias not using weight decay # all bias not using weight decay
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
elif parameter_name.endswith('.gamma'): elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN # bn weight bias not using weight decay, be carefully for now x not include BN
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
elif parameter_name.endswith('.beta'): elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN # bn weight bias not using weight decay, be carefully for now x not include BN
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
else: else:
decay_params.append(x) decay_params.append(x)


+ 1
- 1
model_zoo/official/cv/inceptionv3/README.md View File

@@ -54,7 +54,7 @@ For FP16 operators, if the input data type is FP32, the backend of MindSpore wil
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 1
model_zoo/official/cv/lenet/README.md View File

@@ -56,7 +56,7 @@ Dataset used: [MNIST](<http://yann.lecun.com/exdb/mnist/>)
- Hardware(Ascend/GPU/CPU) - Hardware(Ascend/GPU/CPU)
- Prepare hardware environment with Ascend, GPU, or CPU processor. - Prepare hardware environment with Ascend, GPU, or CPU processor.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 2
- 2
model_zoo/official/cv/lenet/src/lenet.py View File

@@ -22,8 +22,8 @@ class LeNet5(nn.Cell):
Lenet network Lenet network


Args: Args:
num_class (int): Num classes. Default: 10.
num_channel (int): Num channels. Default: 1.
num_class (int): Number of classes. Default: 10.
num_channel (int): Number of channels. Default: 1.


Returns: Returns:
Tensor, output tensor Tensor, output tensor


+ 1
- 1
model_zoo/official/cv/lenet_quant/src/lenet_fusion.py View File

@@ -21,7 +21,7 @@ class LeNet5(nn.Cell):
Lenet network Lenet network


Args: Args:
num_class (int): Num classes. Default: 10.
num_class (int): Number of classes. Default: 10.


Returns: Returns:
Tensor, output tensor Tensor, output tensor


+ 1
- 1
model_zoo/official/cv/lenet_quant/src/lenet_quant.py View File

@@ -22,7 +22,7 @@ class LeNet5(nn.Cell):
Lenet network Lenet network


Args: Args:
num_class (int): Num classes. Default: 10.
num_class (int): Number of classes. Default: 10.


Returns: Returns:
Tensor, output tensor Tensor, output tensor


+ 1
- 1
model_zoo/official/cv/maskrcnn/src/maskrcnn/bbox_assign_sample_stage2.py View File

@@ -118,7 +118,7 @@ class BboxAssignSampleForRcnn(nn.Cell):
bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \ bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \
(self.num_bboxes, 1)), (1, 4)), mstype.bool_), \ (self.num_bboxes, 1)), (1, 4)), mstype.bool_), \
bboxes, self.check_anchor_two) bboxes, self.check_anchor_two)
# 1 dim = gt, 2 dim = bbox
overlaps = self.iou(bboxes, gt_bboxes_i) overlaps = self.iou(bboxes, gt_bboxes_i)


max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps) max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)


+ 1
- 1
model_zoo/official/cv/mobilenetv2/README.md View File

@@ -51,7 +51,7 @@ For FP16 operators, if the input data type is FP32, the backend of MindSpore wil
- Hardware(Ascend/GPU/CPU) - Hardware(Ascend/GPU/CPU)
- Prepare hardware environment with Ascend、GPU or CPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend、GPU or CPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 5
- 3
model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py View File

@@ -145,7 +145,7 @@ class MobileNetV2Backbone(nn.Cell):
MobileNetV2 architecture. MobileNetV2 architecture.


Args: Args:
class_num (Cell): number of classes.
class_num (int): number of classes.
width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1. width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1.
has_dropout (bool): Is dropout used. Default is false has_dropout (bool): Is dropout used. Default is false
inverted_residual_setting (list): Inverted residual settings. Default is None inverted_residual_setting (list): Inverted residual settings. Default is None
@@ -233,7 +233,7 @@ class MobileNetV2Head(nn.Cell):
MobileNetV2 architecture. MobileNetV2 architecture.


Args: Args:
class_num (Cell): number of classes.
class_num (int): Number of classes. Default is 1000.
has_dropout (bool): Is dropout used. Default is false has_dropout (bool): Is dropout used. Default is false
Returns: Returns:
Tensor, output tensor. Tensor, output tensor.
@@ -284,11 +284,13 @@ class MobileNetV2(nn.Cell):
MobileNetV2 architecture. MobileNetV2 architecture.


Args: Args:
class_num (Cell): number of classes.
class_num (int): number of classes.
width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1. width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1.
has_dropout (bool): Is dropout used. Default is false has_dropout (bool): Is dropout used. Default is false
inverted_residual_setting (list): Inverted residual settings. Default is None inverted_residual_setting (list): Inverted residual settings. Default is None
round_nearest (list): Channel round to . Default is 8 round_nearest (list): Channel round to . Default is 8
backbone(nn.Cell): Backbone of MobileNetV2.
head(nn.Cell): Classification head of MobileNetV2.
Returns: Returns:
Tensor, output tensor. Tensor, output tensor.




+ 2
- 2
model_zoo/official/cv/mobilenetv2/src/models.py View File

@@ -29,8 +29,8 @@ class CrossEntropyWithLabelSmooth(_Loss):
CrossEntropyWith LabelSmooth. CrossEntropyWith LabelSmooth.


Args: Args:
smooth_factor (float): smooth factor, default=0.
num_classes (int): num classes
smooth_factor (float): smooth factor. Default is 0.
num_classes (int): number of classes. Default is 1000.


Returns: Returns:
None. None.


+ 2
- 2
model_zoo/official/cv/mobilenetv2_quant/src/utils.py View File

@@ -83,8 +83,8 @@ class CrossEntropyWithLabelSmooth(_Loss):
CrossEntropyWith LabelSmooth. CrossEntropyWith LabelSmooth.


Args: Args:
smooth_factor (float): smooth factor, default=0.
num_classes (int): num classes
smooth_factor (float): smooth factor for label smooth. Default is 0.
num_classes (int): number of classes. Default is 1000.


Returns: Returns:
None. None.


+ 1
- 1
model_zoo/official/cv/mobilenetv3/Readme.md View File

@@ -45,7 +45,7 @@ Dataset used: [imagenet](http://www.image-net.org/)
- Hardware(GPU) - Hardware(GPU)
- Prepare hardware environment with GPU processor. - Prepare hardware environment with GPU processor.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 2
- 2
model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py View File

@@ -83,7 +83,7 @@ class SE(nn.Cell):
SE warpper definition. SE warpper definition.


Args: Args:
num_out (int): Output channel.
num_out (int): Numbers of output channels.
ratio (int): middle output ratio. ratio (int): middle output ratio.


Returns: Returns:
@@ -301,7 +301,7 @@ class MobileNetV3(nn.Cell):
def _make_layer(self, kernel_size, exp_ch, out_channel, use_se, act_func, stride=1): def _make_layer(self, kernel_size, exp_ch, out_channel, use_se, act_func, stride=1):
mid_planes = exp_ch mid_planes = exp_ch
out_planes = out_channel out_planes = out_channel
#num_in, num_mid, num_out, kernel_size, stride=1, act_type='relu', use_se=False):
layer = ResUnit(self.inplanes, mid_planes, out_planes, layer = ResUnit(self.inplanes, mid_planes, out_planes,
kernel_size, stride=stride, act_type=act_func, use_se=use_se) kernel_size, stride=stride, act_type=act_func, use_se=use_se)
self.inplanes = out_planes self.inplanes = out_planes


+ 2
- 2
model_zoo/official/cv/mobilenetv3/train.py View File

@@ -68,8 +68,8 @@ class CrossEntropyWithLabelSmooth(_Loss):
CrossEntropyWith LabelSmooth. CrossEntropyWith LabelSmooth.


Args: Args:
smooth_factor (float): smooth factor, default=0.
num_classes (int): num classes
smooth_factor (float): smooth factor for label smooth. Default is 0.
num_classes (int): number of classes. Default is 1000.


Returns: Returns:
None. None.


+ 0
- 1
model_zoo/official/cv/nasnet/src/dataset.py View File

@@ -47,7 +47,6 @@ def create_dataset(dataset_path, config, do_train, repeat_num=1):
C.RandomCropDecodeResize(config.image_size), C.RandomCropDecodeResize(config.image_size),
C.RandomHorizontalFlip(prob=0.5), C.RandomHorizontalFlip(prob=0.5),
C.RandomColorAdjust(brightness=0.4, saturation=0.5) # fast mode C.RandomColorAdjust(brightness=0.4, saturation=0.5) # fast mode
# C.RandomColorAdjust(brightness=0.4, contrast=0.5, saturation=0.5, hue=0.2)
] ]
else: else:
trans = [ trans = [


+ 2
- 2
model_zoo/official/cv/resnet_thor/src/dataset_helper.py View File

@@ -151,7 +151,7 @@ class _DatasetIter:




class _DatasetIterMSLoopSink(_DatasetIter): class _DatasetIterMSLoopSink(_DatasetIter):
"""Iter for context (device_target=Ascend)"""
"""Iter for context when device_target is Ascend"""
def __init__(self, dataset, sink_size, epoch_num, iter_first_order): def __init__(self, dataset, sink_size, epoch_num, iter_first_order):
super().__init__(dataset, sink_size, epoch_num) super().__init__(dataset, sink_size, epoch_num)
sink_count = 1 sink_count = 1
@@ -179,7 +179,7 @@ class _DatasetIterMSLoopSink(_DatasetIter):




class _DatasetIterMS(_DatasetIter): class _DatasetIterMS(_DatasetIter):
"""Iter for MS(enable_loop_sink=False)."""
"""Iter for MS when enable_loop_sink is False."""
def __init__(self, dataset, sink_size, epoch_num): def __init__(self, dataset, sink_size, epoch_num):
super().__init__(dataset, sink_size, epoch_num) super().__init__(dataset, sink_size, epoch_num)
if sink_size > 0: if sink_size > 0:


+ 1
- 1
model_zoo/official/cv/resnet_thor/src/resnet_thor.py View File

@@ -283,7 +283,7 @@ class ResNet(nn.Cell):
frequency=frequency, batch_size=batch_size) frequency=frequency, batch_size=batch_size)
self.bn1 = _bn(64) self.bn1 = _bn(64)
self.relu = P.ReLU() self.relu = P.ReLU()
# self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")


self.layer1 = self._make_layer(block, self.layer1 = self._make_layer(block,


+ 1
- 1
model_zoo/official/cv/resnext50/README.md View File

@@ -56,7 +56,7 @@ For FP16 operators, if the input data type is FP32, the backend of MindSpore wil
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 0
- 3
model_zoo/official/cv/resnext50/src/utils/optimizers__init__.py View File

@@ -23,15 +23,12 @@ def get_param_groups(network):
parameter_name = x.name parameter_name = x.name
if parameter_name.endswith('.bias'): if parameter_name.endswith('.bias'):
# all bias not using weight decay # all bias not using weight decay
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
elif parameter_name.endswith('.gamma'): elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN # bn weight bias not using weight decay, be carefully for now x not include BN
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
elif parameter_name.endswith('.beta'): elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN # bn weight bias not using weight decay, be carefully for now x not include BN
# print('no decay:{}'.format(parameter_name))
no_decay_params.append(x) no_decay_params.append(x)
else: else:
decay_params.append(x) decay_params.append(x)


+ 1
- 1
model_zoo/official/cv/shufflenetv2/Readme.md View File

@@ -40,7 +40,7 @@ Dataset used: [imagenet](http://www.image-net.org/)
- Hardware(GPU) - Hardware(GPU)
- Prepare hardware environment with GPU processor. - Prepare hardware environment with GPU processor.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 0
- 1
model_zoo/official/cv/shufflenetv2/src/dataset.py View File

@@ -66,7 +66,6 @@ def create_dataset(dataset_path, do_train, rank, group_size, repeat_num=1):
trans += [ trans += [
toBGR(), toBGR(),
C.Rescale(1.0 / 255.0, 0.0), C.Rescale(1.0 / 255.0, 0.0),
# C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
C.HWC2CHW(), C.HWC2CHW(),
C2.TypeCast(mstype.float32) C2.TypeCast(mstype.float32)
] ]


+ 0
- 1
model_zoo/official/cv/shufflenetv2/src/shufflenetv2.py View File

@@ -79,7 +79,6 @@ class ShuffleV2Block(nn.Cell):


def channel_shuffle(self, x): def channel_shuffle(self, x):
batchsize, num_channels, height, width = P.Shape()(x) batchsize, num_channels, height, width = P.Shape()(x)
##assert (num_channels % 4 == 0)
x = P.Reshape()(x, (batchsize * num_channels // 2, 2, height * width,)) x = P.Reshape()(x, (batchsize * num_channels // 2, 2, height * width,))
x = P.Transpose()(x, (1, 0, 2,)) x = P.Transpose()(x, (1, 0, 2,))
x = P.Reshape()(x, (2, -1, num_channels // 2, height, width,)) x = P.Reshape()(x, (2, -1, num_channels // 2, height, width,))


+ 0
- 1
model_zoo/official/cv/ssd/src/dataset.py View File

@@ -162,7 +162,6 @@ def create_voc_label(is_training):
voc_dir = config.voc_dir voc_dir = config.voc_dir
cls_map = {name: i for i, name in enumerate(config.coco_classes)} cls_map = {name: i for i, name in enumerate(config.coco_classes)}
sub_dir = 'train' if is_training else 'eval' sub_dir = 'train' if is_training else 'eval'
# sub_dir = 'train'
voc_dir = os.path.join(voc_dir, sub_dir) voc_dir = os.path.join(voc_dir, sub_dir)
if not os.path.isdir(voc_dir): if not os.path.isdir(voc_dir):
raise ValueError(f'Cannot find {sub_dir} dataset path.') raise ValueError(f'Cannot find {sub_dir} dataset path.')


+ 0
- 1
model_zoo/official/cv/vgg16/train.py View File

@@ -14,7 +14,6 @@
# ============================================================================ # ============================================================================
""" """
#################train vgg16 example on cifar10######################## #################train vgg16 example on cifar10########################
python train.py --data_path=$DATA_HOME --device_id=$DEVICE_ID
""" """
import argparse import argparse
import datetime import datetime


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53/src/transforms.py View File

@@ -146,7 +146,7 @@ def _preprocess_true_boxes(true_boxes, anchors, in_shape, num_classes,
# input_shape is [h, w] # input_shape is [h, w]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1] true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1] true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
# true_boxes = [xywh]
# true_boxes [x, y, w, h]
grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8] grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]
# grid_shape [h, w] # grid_shape [h, w]


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53_quant/src/transforms.py View File

@@ -153,7 +153,7 @@ def _preprocess_true_boxes(true_boxes, anchors, in_shape, num_classes,
# input_shape is [h, w] # input_shape is [h, w]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1] true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1] true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
# true_boxes = [xywh]
# true_boxes [x, y, w, h]
grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8] grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]
# grid_shape [h, w] # grid_shape [h, w]


+ 0
- 1
model_zoo/official/cv/yolov3_resnet18/src/dataset.py View File

@@ -44,7 +44,6 @@ def preprocess_fn(image, box, is_training):
num_layers = anchors.shape[0] // 3 num_layers = anchors.shape[0] // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
true_boxes = np.array(true_boxes, dtype='float32') true_boxes = np.array(true_boxes, dtype='float32')
# input_shape = np.array([in_shape, in_shape], dtype='int32')
input_shape = np.array(in_shape, dtype='int32') input_shape = np.array(in_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2. boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2] boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]


+ 1
- 1
model_zoo/official/nlp/bert/pretrain_eval.py View File

@@ -105,7 +105,7 @@ class BertPretrainEva(nn.Cell):


def get_enwiki_512_dataset(batch_size=1, repeat_count=1, distribute_file=''): def get_enwiki_512_dataset(batch_size=1, repeat_count=1, distribute_file=''):
''' '''
Get enwiki seq_length=512 dataset
Get enwiki dataset when seq_length is 512.
''' '''
ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids", ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids",
"masked_lm_positions", "masked_lm_ids", "masked_lm_positions", "masked_lm_ids",


+ 2
- 2
model_zoo/official/nlp/bert/src/bert_model.py View File

@@ -490,7 +490,7 @@ class BertAttention(nn.Cell):


# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_keys' = [F|T, F|T, H]
# relations_keys is [F|T, F|T, H]
relations_keys = self._generate_relative_positions_embeddings() relations_keys = self._generate_relative_positions_embeddings()
relations_keys = self.cast_compute_type(relations_keys) relations_keys = self.cast_compute_type(relations_keys)
# query_layer_t is [F, B, N, H] # query_layer_t is [F, B, N, H]
@@ -533,7 +533,7 @@ class BertAttention(nn.Cell):


# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_values' = [F|T, F|T, H]
# relations_values is [F|T, F|T, H]
relations_values = self._generate_relative_positions_embeddings() relations_values = self._generate_relative_positions_embeddings()
relations_values = self.cast_compute_type(relations_values) relations_values = self.cast_compute_type(relations_values)
# attention_probs_t is [F, B, N, T] # attention_probs_t is [F, B, N, T]


+ 0
- 1
model_zoo/official/nlp/bert/src/utils.py View File

@@ -165,7 +165,6 @@ def LoadNewestCkpt(load_finetune_checkpoint_dir, steps_per_epoch, epoch_num, pre
name_ext = os.path.splitext(filename) name_ext = os.path.splitext(filename)
if name_ext[-1] != ".ckpt": if name_ext[-1] != ".ckpt":
continue continue
#steps_per_epoch = ds.get_dataset_size()
if filename.find(prefix) == 0 and not filename[pre_len].isalpha(): if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
index = filename[pre_len:].find("-") index = filename[pre_len:].find("-")
if index == 0 and max_num == 0: if index == 0 and max_num == 0:


+ 1
- 1
model_zoo/official/nlp/bert_thor/README.md View File

@@ -49,7 +49,7 @@ The classical first-order optimization algorithm, such as SGD, has a small amoun
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 1
model_zoo/official/nlp/bert_thor/pretrain_eval.py View File

@@ -110,7 +110,7 @@ class BertPretrainEva(nn.Cell):


def get_enwiki_512_dataset(batch_size=1, repeat_count=1, distribute_file=''): def get_enwiki_512_dataset(batch_size=1, repeat_count=1, distribute_file=''):
''' '''
Get enwiki seq_length=512 dataset
Get enwiki dataset when seq_length is 512.
''' '''
ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids", ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids",
"masked_lm_positions", "masked_lm_ids", "masked_lm_positions", "masked_lm_ids",


+ 2
- 2
model_zoo/official/nlp/bert_thor/src/bert_model.py View File

@@ -566,7 +566,7 @@ class BertAttention(nn.Cell):


# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_keys' = [F|T, F|T, H]
# relations_keys is [F|T, F|T, H]
relations_keys = self._generate_relative_positions_embeddings() relations_keys = self._generate_relative_positions_embeddings()
relations_keys = self.cast_compute_type(relations_keys) relations_keys = self.cast_compute_type(relations_keys)
# query_layer_t is [F, B, N, H] # query_layer_t is [F, B, N, H]
@@ -609,7 +609,7 @@ class BertAttention(nn.Cell):


# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_values' = [F|T, F|T, H]
# relations_values is [F|T, F|T, H]
relations_values = self._generate_relative_positions_embeddings() relations_values = self._generate_relative_positions_embeddings()
relations_values = self.cast_compute_type(relations_values) relations_values = self.cast_compute_type(relations_values)
# attention_probs_t is [F, B, N, T] # attention_probs_t is [F, B, N, T]


+ 1
- 1
model_zoo/official/nlp/bert_thor/src/dataset_helper.py View File

@@ -155,7 +155,7 @@ class _DatasetIter:




class _DatasetIterMSLoopSink(_DatasetIter): class _DatasetIterMSLoopSink(_DatasetIter):
"""Iter for context (device_target=Ascend)"""
"""Iter for context, the device_target is Ascend."""


def __init__(self, dataset, sink_size, epoch_num, iter_first_order): def __init__(self, dataset, sink_size, epoch_num, iter_first_order):
super().__init__(dataset, sink_size, epoch_num) super().__init__(dataset, sink_size, epoch_num)


+ 2
- 2
model_zoo/official/nlp/bert_thor/src/thor_for_bert.py View File

@@ -198,7 +198,7 @@ class THOR(Optimizer):
g = F.depend(g, fake_G) g = F.depend(g, fake_G)
new_grads = new_grads + (g, pooler_bias) new_grads = new_grads + (g, pooler_bias)


# for cls1 fc layer: mlm
# cls1 fully connect layer for masked language model(mlm)
mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8 mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8
matrix_idx = self.num_hidden_layers * 6 + 4 matrix_idx = self.num_hidden_layers * 6 + 4
g = gradients[mlm_fc_idx] g = gradients[mlm_fc_idx]
@@ -327,7 +327,7 @@ class THOR(Optimizer):
g = self.cast(g, mstype.float32) g = self.cast(g, mstype.float32)
new_grads = new_grads + (g, pooler_bias) new_grads = new_grads + (g, pooler_bias)


# for cls1 fc layer: mlm
# cls1 fully connect layer for masked language model(mlm)
mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8 mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8
matrix_idx = self.num_hidden_layers * 6 + 4 matrix_idx = self.num_hidden_layers * 6 + 4
g = gradients[mlm_fc_idx] g = gradients[mlm_fc_idx]


+ 2
- 2
model_zoo/official/nlp/bert_thor/src/thor_for_bert_arg.py View File

@@ -203,7 +203,7 @@ class THOR(Optimizer):
g = F.depend(g, fake_G) g = F.depend(g, fake_G)
new_grads = new_grads + (g, pooler_bias) new_grads = new_grads + (g, pooler_bias)


# for cls1 fc layer: mlm
# cls1 fully connect layer for masked language model(mlm)
mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8 mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8
matrix_idx = self.num_hidden_layers * 6 + 4 matrix_idx = self.num_hidden_layers * 6 + 4
g = gradients[mlm_fc_idx] g = gradients[mlm_fc_idx]
@@ -333,7 +333,7 @@ class THOR(Optimizer):
g = self.cast(g, mstype.float32) g = self.cast(g, mstype.float32)
new_grads = new_grads + (g, pooler_bias) new_grads = new_grads + (g, pooler_bias)


# for cls1 fc layer: mlm
# cls1 fully connect layer for masked language model(mlm)
mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8 mlm_fc_idx = encoder_layers_num * self.num_hidden_layers + 8
matrix_idx = self.num_hidden_layers * 6 + 4 matrix_idx = self.num_hidden_layers * 6 + 4
g = gradients[mlm_fc_idx] g = gradients[mlm_fc_idx]


+ 0
- 1
model_zoo/official/nlp/bert_thor/src/utils.py View File

@@ -129,7 +129,6 @@ def LoadNewestCkpt(load_finetune_checkpoint_dir, steps_per_epoch, epoch_num, pre
name_ext = os.path.splitext(filename) name_ext = os.path.splitext(filename)
if name_ext[-1] != ".ckpt": if name_ext[-1] != ".ckpt":
continue continue
# steps_per_epoch = ds.get_dataset_size()
if filename.find(prefix) == 0 and not filename[pre_len].isalpha(): if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
index = filename[pre_len:].find("-") index = filename[pre_len:].find("-")
if index == 0 and max_num == 0: if index == 0 and max_num == 0:


+ 0
- 1
model_zoo/official/nlp/lstm/eval.py View File

@@ -14,7 +14,6 @@
# ============================================================================ # ============================================================================
""" """
#################train lstm example on aclImdb######################## #################train lstm example on aclImdb########################
python eval.py --ckpt_path=./lstm-20-390.ckpt
""" """
import argparse import argparse
import os import os


+ 2
- 2
model_zoo/official/nlp/lstm/src/imdb.py View File

@@ -103,7 +103,7 @@ class ImdbParser():
vocab = set(chain(*tokenized_features)) vocab = set(chain(*tokenized_features))
self.__vacab[seg] = vocab self.__vacab[seg] = vocab


# word_to_idx: {'hello': 1, 'world':111, ... '<unk>': 0}
# word_to_idx looks like {'hello': 1, 'world':111, ... '<unk>': 0}
word_to_idx = {word: i + 1 for i, word in enumerate(vocab)} word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}
word_to_idx['<unk>'] = 0 word_to_idx['<unk>'] = 0
self.__word2idx[seg] = word_to_idx self.__word2idx[seg] = word_to_idx
@@ -147,7 +147,7 @@ class ImdbParser():


def get_datas(self, seg): def get_datas(self, seg):
""" """
return features, labels, and weight
get features, labels, and weight by gensim.
""" """
features = np.array(self.__features[seg]).astype(np.int32) features = np.array(self.__features[seg]).astype(np.int32)
labels = np.array(self.__labels[seg]).astype(np.int32) labels = np.array(self.__labels[seg]).astype(np.int32)


+ 0
- 1
model_zoo/official/nlp/lstm/train.py View File

@@ -14,7 +14,6 @@
# ============================================================================ # ============================================================================
""" """
#################train lstm example on aclImdb######################## #################train lstm example on aclImdb########################
python train.py --preprocess=true --aclimdb_path=your_imdb_path --glove_path=your_glove_path
""" """
import argparse import argparse
import os import os


+ 1
- 1
model_zoo/official/nlp/mass/README.md View File

@@ -472,7 +472,7 @@ More detail about LR scheduler could be found in `src/utils/lr_scheduler.py`.
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend , please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 2
model_zoo/official/nlp/mass/src/transformer/beam_search.py View File

@@ -94,7 +94,6 @@ class TileBeam(nn.Cell):
# add an dim # add an dim
input_tensor = self.expand(input_tensor, 1) input_tensor = self.expand(input_tensor, 1)
# get tile shape: [1, beam, ...] # get tile shape: [1, beam, ...]
# shape = self.shape(input_tensor)
tile_shape = (1,) + (self.beam_width,) tile_shape = (1,) + (self.beam_width,)
for _ in range(len(shape) - 1): for _ in range(len(shape) - 1):
tile_shape = tile_shape + (1,) tile_shape = tile_shape + (1,)
@@ -349,7 +348,7 @@ class BeamSearchDecoder(nn.Cell):


# add length penalty scores # add length penalty scores
penalty_len = self.length_penalty(state_length) penalty_len = self.length_penalty(state_length)
# return penalty_len
# get penalty length
log_probs = self.real_div(state_log_probs, penalty_len) log_probs = self.real_div(state_log_probs, penalty_len)


# sort according to scores # sort according to scores


+ 0
- 2
model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py View File

@@ -383,7 +383,6 @@ class BertNetworkWithLoss_td(nn.Cell):
if is_predistill: if is_predistill:
new_param_dict = {} new_param_dict = {}
for key, value in param_dict.items(): for key, value in param_dict.items():
# new_key = re.sub('tinybert_', 'bert_', key)
new_key = re.sub('tinybert_', 'bert_', 'bert.' + key) new_key = re.sub('tinybert_', 'bert_', 'bert.' + key)
new_param_dict[new_key] = value new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict) load_param_into_net(self.bert, new_param_dict)
@@ -391,7 +390,6 @@ class BertNetworkWithLoss_td(nn.Cell):
new_param_dict = {} new_param_dict = {}
for key, value in param_dict.items(): for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', key) new_key = re.sub('tinybert_', 'bert_', key)
# new_key = re.sub('tinybert_', 'bert_', 'bert.'+ key)
new_param_dict[new_key] = value new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict) load_param_into_net(self.bert, new_param_dict)
self.cast = P.Cast() self.cast = P.Cast()


+ 2
- 2
model_zoo/official/nlp/tinybert/src/tinybert_model.py View File

@@ -502,7 +502,7 @@ class BertAttention(nn.Cell):
attention_scores = self.matmul_trans_b(query_layer, key_layer) attention_scores = self.matmul_trans_b(query_layer, key_layer)
# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_keys' = [F|T, F|T, H]
# relations_keys is [F|T, F|T, H]
relations_keys = self._generate_relative_positions_embeddings() relations_keys = self._generate_relative_positions_embeddings()
relations_keys = self.cast_compute_type(relations_keys) relations_keys = self.cast_compute_type(relations_keys)
# query_layer_t is [F, B, N, H] # query_layer_t is [F, B, N, H]
@@ -539,7 +539,7 @@ class BertAttention(nn.Cell):
context_layer = self.matmul(attention_probs, value_layer) context_layer = self.matmul(attention_probs, value_layer)
# use_relative_position, supplementary logic # use_relative_position, supplementary logic
if self.use_relative_positions: if self.use_relative_positions:
# 'relations_values' = [F|T, F|T, H]
# relations_values is [F|T, F|T, H]
relations_values = self._generate_relative_positions_embeddings() relations_values = self._generate_relative_positions_embeddings()
relations_values = self.cast_compute_type(relations_values) relations_values = self.cast_compute_type(relations_values)
# attention_probs_t is [F, B, N, T] # attention_probs_t is [F, B, N, T]


+ 1
- 1
model_zoo/official/nlp/transformer/src/beam_search.py View File

@@ -258,7 +258,7 @@ class BeamSearchDecoder(nn.Cell):


# add length penalty scores # add length penalty scores
penalty_len = self.length_penalty(state_length) penalty_len = self.length_penalty(state_length)
# return penalty_len
# get penalty length
log_probs = self.real_div(state_log_probs, penalty_len) log_probs = self.real_div(state_log_probs, penalty_len)


# sort according to scores # sort according to scores


+ 1
- 1
model_zoo/official/nlp/transformer/src/transformer_for_train.py View File

@@ -55,7 +55,7 @@ class ClipGradients(nn.Cell):
grads, grads,
clip_type, clip_type,
clip_value): clip_value):
"""return grads"""
"""Defines the gradients clip."""
if clip_type != 0 and clip_type != 1: if clip_type != 0 and clip_type != 1:
return grads return grads




+ 1
- 0
model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py View File

@@ -156,6 +156,7 @@ class WideDeepModel(nn.Cell):
emb64_multi_size = 20900 emb64_multi_size = 20900
indicator_size = 16 indicator_size = 16
deep_dim_list = [1024, 1024, 1024, 1024, 1024] deep_dim_list = [1024, 1024, 1024, 1024, 1024]
wide_reg_coef = [0.0, 0.0] wide_reg_coef = [0.0, 0.0]
deep_reg_coef = [0.0, 0.0] deep_reg_coef = [0.0, 0.0]
wide_lr = 0.2 wide_lr = 0.2


+ 1
- 1
model_zoo/research/cv/ghostnet/Readme.md View File

@@ -43,7 +43,7 @@ Dataset used: [Oxford-IIIT Pet](https://www.robots.ox.ac.uk/~vgg/data/pets/)
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 0
- 1
model_zoo/research/cv/ghostnet/src/ghostnet600.py View File

@@ -335,7 +335,6 @@ class GhostNet(nn.Cell):


self.blocks = [] self.blocks = []
for layer_cfg in self.cfgs: for layer_cfg in self.cfgs:
#print (layer_cfg)
self.blocks.append(self._make_layer(kernel_size=layer_cfg[0], self.blocks.append(self._make_layer(kernel_size=layer_cfg[0],
exp_ch=_make_divisible( exp_ch=_make_divisible(
self.inplanes * layer_cfg[3]), self.inplanes * layer_cfg[3]),


+ 1
- 1
model_zoo/research/cv/ghostnet_quant/Readme.md View File

@@ -48,7 +48,7 @@ Dataset used: [Oxford-IIIT Pet](https://www.robots.ox.ac.uk/~vgg/data/pets/)
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 1
model_zoo/research/cv/ghostnet_quant/src/ghostnet.py View File

@@ -105,7 +105,7 @@ class SE(nn.Cell):
SE warpper definition. SE warpper definition.


Args: Args:
num_out (int): Output channel.
num_out (int): output channel.
ratio (int): middle output ratio. ratio (int): middle output ratio.


Returns: Returns:


+ 1
- 1
model_zoo/research/cv/resnet50_adv_pruning/Readme.md View File

@@ -36,7 +36,7 @@ Dataset used: [Oxford-IIIT Pet](https://www.robots.ox.ac.uk/~vgg/data/pets/)
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 1
- 1
model_zoo/research/cv/ssd_ghostnet/README.md View File

@@ -23,7 +23,7 @@ Dataset used: [COCO2017](<http://images.cocodataset.org/>)
- Hardware(Ascend/GPU) - Hardware(Ascend/GPU)
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources. - Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework - Framework
- [MindSpore](http://10.90.67.50/mindspore/archive/20200506/OpenSource/me_vm_x86/)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below: - For more information, please check the resources below:
- [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html) - [MindSpore tutorials](https://www.mindspore.cn/tutorial/zh-CN/master/index.html)
- [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html) - [MindSpore API](https://www.mindspore.cn/api/zh-CN/master/index.html)


+ 0
- 8
model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py View File

@@ -84,7 +84,6 @@ class ConvBNReLU(nn.Cell):


def construct(self, x): def construct(self, x):
output = self.features(x) output = self.features(x)
# print(output.shape)
return output return output




@@ -267,8 +266,6 @@ class GhostModule(nn.Cell):
def construct(self, x): def construct(self, x):
x1 = self.primary_conv(x) x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1) x2 = self.cheap_operation(x1)
# print(x1.shape)
# print(x2.shape)
return self.concat((x1, x2)) return self.concat((x1, x2))




@@ -342,7 +339,6 @@ class GhostBottleneck(nn.Cell):
out = self.add(shortcut, out) out = self.add(shortcut, out)
if self.last_relu: if self.last_relu:
out = self.relu(out) out = self.relu(out)
# print(out.shape)
return out return out


def _get_pad(self, kernel_size): def _get_pad(self, kernel_size):
@@ -410,7 +406,6 @@ class InvertedResidual(nn.Cell):
x = self.add(identity, x) x = self.add(identity, x)
if self.last_relu: if self.last_relu:
x = self.relu(x) x = self.relu(x)
# print(x.shape)
return x return x




@@ -675,7 +670,6 @@ class SSDWithGhostNet(nn.Cell):
def __init__(self, model_cfgs, multiplier=1., round_nearest=8): def __init__(self, model_cfgs, multiplier=1., round_nearest=8):
super(SSDWithGhostNet, self).__init__() super(SSDWithGhostNet, self).__init__()
self.cfgs = model_cfgs['cfg'] self.cfgs = model_cfgs['cfg']
# self.inplanes = 16 ## for "1x"
self.inplanes = 20 # for "1.3x" self.inplanes = 20 # for "1.3x"
first_conv_in_channel = 3 first_conv_in_channel = 3
first_conv_out_channel = _make_divisible(multiplier * self.inplanes) first_conv_out_channel = _make_divisible(multiplier * self.inplanes)
@@ -686,7 +680,6 @@ class SSDWithGhostNet(nn.Cell):


layer_index = 0 layer_index = 0
for layer_cfg in self.cfgs: for layer_cfg in self.cfgs:
# print(layer_cfg)
if layer_index == 11: if layer_index == 11:
hidden_dim = int(round(self.inplanes * 6)) hidden_dim = int(round(self.inplanes * 6))
self.expand_layer_conv_11 = ConvBNReLU( self.expand_layer_conv_11 = ConvBNReLU(
@@ -711,7 +704,6 @@ class SSDWithGhostNet(nn.Cell):
def _make_layer(self, kernel_size, exp_ch, out_channel, use_se, act_func, stride=1): def _make_layer(self, kernel_size, exp_ch, out_channel, use_se, act_func, stride=1):
mid_planes = exp_ch mid_planes = exp_ch
out_planes = out_channel out_planes = out_channel
# num_in, num_mid, num_out, kernel_size, stride=1, act_type='relu', use_se=False):
layer = GhostBottleneck(self.inplanes, mid_planes, out_planes, layer = GhostBottleneck(self.inplanes, mid_planes, out_planes,
kernel_size, stride=stride, act_type=act_func, use_se=use_se) kernel_size, stride=stride, act_type=act_func, use_se=use_se)
self.inplanes = out_planes self.inplanes = out_planes


+ 0
- 3
model_zoo/research/cv/ssd_ghostnet/train.py View File

@@ -23,10 +23,8 @@ from mindspore import context, Tensor
from mindspore.communication.management import init from mindspore.communication.management import init
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
from mindspore.train import Model, ParallelMode from mindspore.train import Model, ParallelMode
# from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.ssd_ghostnet import SSD300, SSDWithLossCell, TrainingWrapper, ssd_ghostnet from src.ssd_ghostnet import SSD300, SSDWithLossCell, TrainingWrapper, ssd_ghostnet
# from src.config_ghostnet_1x import config
from src.config_ghostnet_13x import config from src.config_ghostnet_13x import config
from src.dataset import create_ssd_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord from src.dataset import create_ssd_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord
from src.lr_schedule import get_lr from src.lr_schedule import get_lr
@@ -124,7 +122,6 @@ def main():


backbone = ssd_ghostnet() backbone = ssd_ghostnet()
ssd = SSD300(backbone=backbone, config=config) ssd = SSD300(backbone=backbone, config=config)
# print(ssd)
net = SSDWithLossCell(ssd, config) net = SSDWithLossCell(ssd, config)
init_net_param(net) init_net_param(net)




+ 0
- 1
model_zoo/utils/graph_to_mindrecord/writer.py View File

@@ -149,7 +149,6 @@ if __name__ == "__main__":
# pass mr_api arguments # pass mr_api arguments
os.environ['graph_api_args'] = args.graph_api_args os.environ['graph_api_args'] = args.graph_api_args


# import mr_api
try: try:
mr_api = import_module(args.mindrecord_script + '.mr_api') mr_api = import_module(args.mindrecord_script + '.mr_api')
except ModuleNotFoundError: except ModuleNotFoundError:


+ 1
- 6
tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py View File

@@ -13,22 +13,19 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""wide and deep model""" """wide and deep model"""
import numpy as np
from mindspore import nn from mindspore import nn
from mindspore import Parameter, ParameterTuple from mindspore import Parameter, ParameterTuple
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore.ops import functional as F from mindspore.ops import functional as F
from mindspore.ops import composite as C from mindspore.ops import composite as C
from mindspore.ops import operations as P from mindspore.ops import operations as P
# from mindspore.nn import Dropout
from mindspore.nn.optim import Adam, FTRL from mindspore.nn.optim import Adam, FTRL
# from mindspore.nn.metrics import Metric
from mindspore.common.initializer import Uniform, initializer from mindspore.common.initializer import Uniform, initializer
# from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_gradients_mean from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_gradients_mean
from mindspore.context import ParallelMode from mindspore.context import ParallelMode
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.communication.management import get_group_size from mindspore.communication.management import get_group_size
import numpy as np
np_type = np.float32 np_type = np.float32
ms_type = mstype.float32 ms_type = mstype.float32
@@ -110,8 +107,6 @@ class DenseLayer(nn.Cell):
def construct(self, x): def construct(self, x):
x = self.act_func(x) x = self.act_func(x)
# if self.training:
# x = self.dropout(x)
x = self.mul(x, self.scale_coef) x = self.mul(x, self.scale_coef)
if self.convert_dtype: if self.convert_dtype:
x = self.cast(x, mstype.float16) x = self.cast(x, mstype.float16)


+ 1
- 2
tests/st/networks/models/bert/src/fused_layer_norm.py View File

@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""fused layernorm""" """fused layernorm"""
import numpy as np
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops import functional as F from mindspore.ops import functional as F
from mindspore.common.parameter import Parameter from mindspore.common.parameter import Parameter
@@ -21,8 +22,6 @@ from mindspore.ops.primitive import constexpr
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from mindspore.nn.cell import Cell from mindspore.nn.cell import Cell


import numpy as np



__all__ = ['FusedLayerNorm'] __all__ = ['FusedLayerNorm']




+ 1
- 1
tests/st/networks/models/deeplabv3/src/md_dataset.py View File

@@ -13,10 +13,10 @@
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
"""Dataset module.""" """Dataset module."""
import numpy as np
from PIL import Image from PIL import Image
import mindspore.dataset as de import mindspore.dataset as de
import mindspore.dataset.vision.c_transforms as C import mindspore.dataset.vision.c_transforms as C
import numpy as np


from .ei_dataset import HwVocRawDataset from .ei_dataset import HwVocRawDataset
from .utils import custom_transforms as tr from .utils import custom_transforms as tr


Loading…
Cancel
Save