Browse Source

!593 Fix wizard template module to fit changed operator API

Merge pull request !593 from moran/wizard_dev
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
b7e7681e2c
9 changed files with 81 additions and 43 deletions
  1. +1
    -1
      mindinsight/wizard/conf/templates/network/alexnet/eval.py-tpl
  2. +11
    -27
      mindinsight/wizard/conf/templates/network/alexnet/train.py-tpl
  3. +1
    -1
      mindinsight/wizard/conf/templates/network/lenet/eval.py-tpl
  4. +1
    -1
      mindinsight/wizard/conf/templates/network/lenet/train.py-tpl
  5. +1
    -0
      mindinsight/wizard/conf/templates/network/resnet50/README.md-tpl
  6. +3
    -2
      mindinsight/wizard/conf/templates/network/resnet50/eval.py-tpl
  7. +38
    -0
      mindinsight/wizard/conf/templates/network/resnet50/src/CrossEntropySmooth.py-tpl
  8. +9
    -7
      mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl
  9. +16
    -4
      tests/st/func/wizard/test_resnet50.py

+ 1
- 1
mindinsight/wizard/conf/templates/network/alexnet/eval.py-tpl View File

@@ -48,7 +48,7 @@ if __name__ == "__main__":

network = AlexNet(cfg.num_classes)
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
{% elif loss=='SoftmaxCrossEntropyExpand' %}
net_loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}


+ 11
- 27
mindinsight/wizard/conf/templates/network/alexnet/train.py-tpl View File

@@ -99,33 +99,17 @@ if __name__ == "__main__":
lr = Tensor(get_lr(0, cfg.lr, cfg.epoch_size, ds_train.get_dataset_size()))

# define loss, model
if target == "Ascend":
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}
{% if optimizer=='Momentum' %}
opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate=lr, momentum=cfg.momentum,
weight_decay=cfg.weight_decay, loss_scale=cfg.loss_scale)
{% else %}
opt = nn.{{ optimizer }}(net.trainable_params(), learning_rate=cfg.lr)
{% endif %}
loss_scale = FixedLossScaleManager(cfg.loss_scale, drop_overflow_update=False)
model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'},
amp_level="O2", keep_batchnorm_fp32=False)
else:
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}
{% if optimizer=='Momentum' %}
opt = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=cfg.momentum)
{% else %}
opt = nn.{{ optimizer }}(net.trainable_params(), learning_rate=lr)
{% endif %}
model = Model(net, loss, opt, metrics={"Accuracy": Accuracy()})
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}
{% if optimizer=='Momentum' %}
opt = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=cfg.momentum)
{% else %}
opt = nn.{{ optimizer }}(net.trainable_params(), learning_rate=lr)
{% endif %}
model = Model(net, loss, opt, metrics={"Accuracy": Accuracy()})

# define callbacks
time_cb = TimeMonitor(data_size=step_size)


+ 1
- 1
mindinsight/wizard/conf/templates/network/lenet/eval.py-tpl View File

@@ -48,7 +48,7 @@ if __name__ == "__main__":

network = LeNet5(cfg.num_classes)
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
{% elif loss=='SoftmaxCrossEntropyExpand' %}
net_loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}


+ 1
- 1
mindinsight/wizard/conf/templates/network/lenet/train.py-tpl View File

@@ -73,7 +73,7 @@ if __name__ == "__main__":
param_dict = load_checkpoint(args.pre_trained)
load_param_into_net(network, param_dict)
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
{% elif loss=='SoftmaxCrossEntropyExpand' %}
net_loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}


+ 1
- 0
mindinsight/wizard/conf/templates/network/resnet50/README.md-tpl View File

@@ -52,6 +52,7 @@ ImageNet
├── config.py # parameter configuration
├── dataset.py # data preprocessing
├── lr_generator.py # generate learning rate for each step
├── CrossEntropySmooth.py # define the cross entropy loss function with smoothed labels
└── resnet50.py # resNet50 network definition
├── eval.py # eval net
└── train.py # train net


+ 3
- 2
mindinsight/wizard/conf/templates/network/resnet50/eval.py-tpl View File

@@ -22,6 +22,7 @@ from mindspore import context
from mindspore import dataset as de
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.CrossEntropySmooth import CrossEntropySmooth

parser = argparse.ArgumentParser(description='Image classification')

@@ -69,8 +70,8 @@ if __name__ == '__main__':
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
if not cfg.use_label_smooth:
cfg.label_smooth_factor = 0.0
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
loss = CrossEntropySmooth(sparse=True, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}


+ 38
- 0
mindinsight/wizard/conf/templates/network/resnet50/src/CrossEntropySmooth.py-tpl View File

@@ -0,0 +1,38 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""define loss function for network"""
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn.loss.loss import _Loss
from mindspore.ops import functional as F
from mindspore.ops import operations as P


class CrossEntropySmooth(_Loss):
"""CrossEntropy"""
def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000):
super(CrossEntropySmooth, self).__init__()
self.onehot = P.OneHot()
self.sparse = sparse
self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction)

def construct(self, logit, label):
if self.sparse:
label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
loss = self.ce(logit, label)
return loss

+ 9
- 7
mindinsight/wizard/conf/templates/network/resnet50/train.py-tpl View File

@@ -30,6 +30,7 @@ from mindspore.communication.management import init, get_rank, get_group_size
import mindspore.nn as nn
import mindspore.common.initializer as weight_init
from src.lr_generator import get_lr, warmup_cosine_annealing_lr
from src.CrossEntropySmooth import CrossEntropySmooth

parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
@@ -105,8 +106,9 @@ if __name__ == '__main__':
warmup_epochs=cfg.warmup_epochs, total_epochs=cfg.epoch_size, steps_per_epoch=step_size,
lr_decay_mode='poly')
{% else %}
lr = get_lr(lr_init=cfg.lr_init, lr_end=0.0, lr_max=cfg.lr_max, warmup_epochs=cfg.warmup_epochs,
total_epochs=cfg.epoch_size, steps_per_epoch=step_size, lr_decay_mode='cosine')
lr = get_lr(lr_init=cfg.lr_init, lr_end=0.0, lr_max=cfg.lr_max,
warmup_epochs=cfg.warmup_epochs, total_epochs=cfg.epoch_size, steps_per_epoch=step_size,
lr_decay_mode='cosine')
{% endif %}

lr = Tensor(lr)
@@ -125,8 +127,8 @@ if __name__ == '__main__':
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
if not cfg.use_label_smooth:
cfg.label_smooth_factor = 0.0
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
loss = CrossEntropySmooth(sparse=True, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}
@@ -146,14 +148,14 @@ if __name__ == '__main__':
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
if not cfg.use_label_smooth:
cfg.label_smooth_factor = 0.0
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
loss = CrossEntropySmooth(sparse=True, reduction='mean',
smooth_factor=cfg.label_smooth_factor, num_classes=cfg.num_classes)
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}
{% else %}
{% if loss=='SoftmaxCrossEntropyWithLogits' %}
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, is_grad=False, reduction='mean')
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
{% elif loss=='SoftmaxCrossEntropyExpand' %}
loss = nn.SoftmaxCrossEntropyExpand(sparse=True)
{% endif %}


+ 16
- 4
tests/st/func/wizard/test_resnet50.py View File

@@ -117,6 +117,7 @@ class TestResNet50:
config_dataset_is_right = False
config_optimizer_is_right = False
network_is_right = False
cross_entorpy_smooth_is_right = False
generator_lr_is_right = False
for source_file in self.source_files:
if source_file.file_relative_path == 'src/dataset.py':
@@ -124,6 +125,8 @@ class TestResNet50:
dataset_is_right = True
if source_file.file_relative_path == os.path.join('src', NETWORK_NAME.lower() + '.py'):
network_is_right = True
if source_file.file_relative_path == 'src/CrossEntropySmooth.py':
cross_entorpy_smooth_is_right = True
if source_file.file_relative_path == 'src/lr_generator.py':
generator_lr_is_right = True
if source_file.file_relative_path == 'src/config.py':
@@ -136,6 +139,7 @@ class TestResNet50:
assert config_dataset_is_right
assert config_optimizer_is_right
assert network_is_right
assert cross_entorpy_smooth_is_right
assert generator_lr_is_right

@staticmethod
@@ -179,13 +183,21 @@ class TestResNet50:
for source_file in self.source_files:
if source_file.file_relative_path == 'train.py':
content = source_file.content
if 'resnet50' in content and loss_name in content and optimizer_name in content:
train_is_right = True
if 'resnet50' in content and optimizer_name in content:
if dataset_name == 'ImageNet' and loss_name == 'SoftmaxCrossEntropyWithLogits' \
and 'loss = CrossEntropySmooth' in content:
train_is_right = True
elif loss_name in content:
train_is_right = True

if source_file.file_relative_path == 'eval.py':
content = source_file.content
if 'resnet50' in content and loss_name in content:
eval_is_right = True
if 'resnet50' in content:
if dataset_name == 'ImageNet' and loss_name == 'SoftmaxCrossEntropyWithLogits' \
and 'loss = CrossEntropySmooth' in content:
eval_is_right = True
elif loss_name in content:
eval_is_right = True

if source_file.file_relative_path == 'README.md':
content = source_file.content


Loading…
Cancel
Save