From fc65b4223f1b3cd6ee81a781f68abb6fe5a645f0 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Fri, 17 Jul 2020 10:41:07 +0800 Subject: [PATCH] fix optimizer example --- mindspore/nn/optim/adam.py | 2 +- mindspore/nn/optim/lazyadam.py | 2 +- mindspore/nn/optim/momentum.py | 2 +- mindspore/nn/optim/rmsprop.py | 2 +- mindspore/nn/optim/sgd.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index b73c284aab..cac688fd54 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -223,7 +223,7 @@ class Adam(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/nn/optim/lazyadam.py b/mindspore/nn/optim/lazyadam.py index 4b97d2eb20..a9bf628bff 100644 --- a/mindspore/nn/optim/lazyadam.py +++ b/mindspore/nn/optim/lazyadam.py @@ -153,7 +153,7 @@ class LazyAdam(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/nn/optim/momentum.py b/mindspore/nn/optim/momentum.py index 1e8ce85570..2710845028 100755 --- a/mindspore/nn/optim/momentum.py +++ b/mindspore/nn/optim/momentum.py @@ -100,7 +100,7 @@ class Momentum(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) + >>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/nn/optim/rmsprop.py b/mindspore/nn/optim/rmsprop.py index 8e8885aff7..c846e9adbe 100644 --- a/mindspore/nn/optim/rmsprop.py +++ b/mindspore/nn/optim/rmsprop.py @@ -143,7 +143,7 @@ class RMSProp(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index 382f095627..43f001ea24 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -101,7 +101,7 @@ class SGD(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.