| @@ -223,7 +223,7 @@ class Adam(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -153,7 +153,7 @@ class LazyAdam(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -100,7 +100,7 @@ class Momentum(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) | |||||
| >>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -143,7 +143,7 @@ class RMSProp(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -101,7 +101,7 @@ class SGD(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||