| @@ -100,7 +100,7 @@ The loss function `SoftmaxCrossEntropyWithLogits` and the optimizer `AdamWeightD | |||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||
| ... | ... | ||||
| # define the loss function | # define the loss function | ||||
| criterion = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") | |||||
| criterion = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
| optimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001) | optimizer = AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001) | ||||
| ... | ... | ||||
| ``` | ``` | ||||
| @@ -320,7 +320,7 @@ from mindspore.nn import WithLossCell, TrainOneStepCell | |||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||
| network = LeNet5() | network = LeNet5() | ||||
| criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") | |||||
| criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") | |||||
| optimizer = nn.AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001) | optimizer = nn.AdamWeightDecay(params=network.trainable_params(), learning_rate=0.0001) | ||||
| net_with_loss = WithLossCell(network, criterion) | net_with_loss = WithLossCell(network, criterion) | ||||
| @@ -53,7 +53,7 @@ class TransformToBNN: | |||||
| >>> return out | >>> return out | ||||
| >>> | >>> | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> net_with_loss = WithLossCell(network, criterion) | >>> net_with_loss = WithLossCell(network, criterion) | ||||
| >>> train_network = TrainOneStepCell(net_with_loss, optim) | >>> train_network = TrainOneStepCell(net_with_loss, optim) | ||||
| @@ -107,7 +107,7 @@ class TransformToBNN: | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> net_with_loss = WithLossCell(network, criterion) | >>> net_with_loss = WithLossCell(network, criterion) | ||||
| >>> train_network = TrainOneStepCell(net_with_loss, optim) | >>> train_network = TrainOneStepCell(net_with_loss, optim) | ||||
| @@ -149,7 +149,7 @@ class TransformToBNN: | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> net_with_loss = WithLossCell(network, criterion) | >>> net_with_loss = WithLossCell(network, criterion) | ||||
| >>> train_network = TrainOneStepCell(net_with_loss, optim) | >>> train_network = TrainOneStepCell(net_with_loss, optim) | ||||
| @@ -105,7 +105,7 @@ class Model: | |||||
| >>> return out | >>> return out | ||||
| >>> | >>> | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| @@ -543,7 +543,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> loss_scale_manager = FixedLossScaleManager() | >>> loss_scale_manager = FixedLossScaleManager() | ||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | ||||
| @@ -661,7 +661,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | ||||
| >>> model.eval(dataset) | >>> model.eval(dataset) | ||||
| """ | """ | ||||
| @@ -392,8 +392,7 @@ If you need to use the trained model to perform inference on multiple hardware p | |||||
| net = GoogleNet(num_classes=cfg.num_classes) | net = GoogleNet(num_classes=cfg.num_classes) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | ||||
| cfg.momentum, weight_decay=cfg.weight_decay) | cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', | |||||
| is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | ||||
| # Load pre-trained model | # Load pre-trained model | ||||
| @@ -419,8 +418,7 @@ If you need to use the trained model to perform inference on multiple hardware p | |||||
| net = GoogleNet(num_classes=cfg.num_classes) | net = GoogleNet(num_classes=cfg.num_classes) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | ||||
| cfg.momentum, weight_decay=cfg.weight_decay) | cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', | |||||
| is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | ||||
| # Load pre-trained model | # Load pre-trained model | ||||
| @@ -453,7 +451,7 @@ If you need to use the trained model to perform inference on multiple hardware p | |||||
| steps_per_epoch=batch_num) | steps_per_epoch=batch_num) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | ||||
| Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | ||||
| amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | ||||
| @@ -487,7 +485,7 @@ If you need to use the trained model to perform inference on multiple hardware p | |||||
| steps_per_epoch=batch_num) | steps_per_epoch=batch_num) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | ||||
| Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | ||||
| amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | ||||
| @@ -135,7 +135,7 @@ class Model: | |||||
| >>> return out | >>> return out | ||||
| >>> | >>> | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| @@ -307,7 +307,7 @@ class Model: | |||||
| >>> train_dataset = get_train_dataset() | >>> train_dataset = get_train_dataset() | ||||
| >>> valid_dataset = get_valid_dataset() | >>> valid_dataset = get_valid_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={'acc'}) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={'acc'}) | ||||
| >>> model.init(train_dataset, valid_dataset) | >>> model.init(train_dataset, valid_dataset) | ||||
| @@ -597,7 +597,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> loss_scale_manager = FixedLossScaleManager() | >>> loss_scale_manager = FixedLossScaleManager() | ||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | ||||
| @@ -714,7 +714,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | ||||
| >>> model.eval(dataset) | >>> model.eval(dataset) | ||||
| """ | """ | ||||
| @@ -243,8 +243,7 @@ https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html | |||||
| net = GoogleNet(num_classes=cfg.num_classes) | net = GoogleNet(num_classes=cfg.num_classes) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, | ||||
| cfg.momentum, weight_decay=cfg.weight_decay) | cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', | |||||
| is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) | ||||
| # Load pre-trained model | # Load pre-trained model | ||||
| @@ -275,7 +274,7 @@ https://www.mindspore.cn/tutorial/zh-CN/master/use/multi_platform_inference.html | |||||
| steps_per_epoch=batch_num) | steps_per_epoch=batch_num) | ||||
| opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), | ||||
| Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}, | ||||
| amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) | ||||
| @@ -199,7 +199,7 @@ class NetWithLossClass(nn.Cell): | |||||
| """ | """ | ||||
| def __init__(self, network): | def __init__(self, network): | ||||
| super(NetWithLossClass, self).__init__(auto_prefix=False) | super(NetWithLossClass, self).__init__(auto_prefix=False) | ||||
| #self.loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| #self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | ||||
| self.network = network | self.network = network | ||||
| self.reducesum = P.ReduceSum(keep_dims=False) | self.reducesum = P.ReduceSum(keep_dims=False) | ||||
| @@ -50,8 +50,7 @@ if __name__ == '__main__': | |||||
| else: | else: | ||||
| raise ValueError("Unsupport platform.") | raise ValueError("Unsupport platform.") | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits( | |||||
| is_grad=False, sparse=True, reduction='mean') | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| if args_opt.model == 'ghostnet': | if args_opt.model == 'ghostnet': | ||||
| net = ghostnet_1x(num_classes=config_platform.num_classes) | net = ghostnet_1x(num_classes=config_platform.num_classes) | ||||
| @@ -49,8 +49,7 @@ if __name__ == '__main__': | |||||
| else: | else: | ||||
| raise ValueError("Unsupport platform.") | raise ValueError("Unsupport platform.") | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits( | |||||
| is_grad=False, sparse=True, reduction='mean') | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| net = ghostnet_1x(num_classes=config_platform.num_classes) | net = ghostnet_1x(num_classes=config_platform.num_classes) | ||||
| @@ -53,8 +53,7 @@ if __name__ == '__main__': | |||||
| else: | else: | ||||
| raise ValueError("Unsupport platform.") | raise ValueError("Unsupport platform.") | ||||
| loss = nn.SoftmaxCrossEntropyWithLogits( | |||||
| is_grad=False, sparse=True, reduction='mean') | |||||
| loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') | |||||
| if args_opt.platform == "Ascend": | if args_opt.platform == "Ascend": | ||||
| net.to_float(mstype.float16) | net.to_float(mstype.float16) | ||||
| @@ -128,7 +128,7 @@ class Model: | |||||
| >>> return out | >>> return out | ||||
| >>> | >>> | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| @@ -295,7 +295,7 @@ class Model: | |||||
| >>> train_dataset = get_train_dataset() | >>> train_dataset = get_train_dataset() | ||||
| >>> valid_dataset = get_valid_dataset() | >>> valid_dataset = get_valid_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={'acc'}) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={'acc'}) | ||||
| >>> model.init(train_dataset, valid_dataset) | >>> model.init(train_dataset, valid_dataset) | ||||
| @@ -566,7 +566,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> loss_scale_manager = FixedLossScaleManager() | >>> loss_scale_manager = FixedLossScaleManager() | ||||
| >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) | ||||
| >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager) | ||||
| @@ -678,7 +678,7 @@ class Model: | |||||
| Examples: | Examples: | ||||
| >>> dataset = get_dataset() | >>> dataset = get_dataset() | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) | |||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | |||||
| >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) | ||||
| >>> model.eval(dataset) | >>> model.eval(dataset) | ||||
| """ | """ | ||||