Browse Source

fix optimizer example

tags/v0.6.0-beta
guohongzilong 5 years ago
parent
commit
fc65b4223f
5 changed files with 5 additions and 5 deletions
  1. +1
    -1
      mindspore/nn/optim/adam.py
  2. +1
    -1
      mindspore/nn/optim/lazyadam.py
  3. +1
    -1
      mindspore/nn/optim/momentum.py
  4. +1
    -1
      mindspore/nn/optim/rmsprop.py
  5. +1
    -1
      mindspore/nn/optim/sgd.py

+ 1
- 1
mindspore/nn/optim/adam.py View File

@@ -223,7 +223,7 @@ class Adam(Optimizer):
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] >>> {'order_params': net.trainable_params()}]
>>> opt = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.


+ 1
- 1
mindspore/nn/optim/lazyadam.py View File

@@ -153,7 +153,7 @@ class LazyAdam(Optimizer):
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] >>> {'order_params': net.trainable_params()}]
>>> opt = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.


+ 1
- 1
mindspore/nn/optim/momentum.py View File

@@ -100,7 +100,7 @@ class Momentum(Optimizer):
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] >>> {'order_params': net.trainable_params()}]
>>> opt = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
>>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.


+ 1
- 1
mindspore/nn/optim/rmsprop.py View File

@@ -143,7 +143,7 @@ class RMSProp(Optimizer):
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] >>> {'order_params': net.trainable_params()}]
>>> opt = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0)
>>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.


+ 1
- 1
mindspore/nn/optim/sgd.py View File

@@ -101,7 +101,7 @@ class SGD(Optimizer):
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}] >>> {'order_params': net.trainable_params()}]
>>> opt = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0)
>>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.


Loading…
Cancel
Save