Browse Source

!8734 Code_docs updating notes of examples in nn_folder

From: @zhangz0911gm
Reviewed-by: @zhunaipan,@liangchenghui
Signed-off-by: @liangchenghui
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
d8a7fd8801
13 changed files with 42 additions and 35 deletions
  1. +6
    -3
      mindspore/nn/loss/loss.py
  2. +3
    -3
      mindspore/nn/metrics/topk.py
  3. +4
    -4
      mindspore/nn/optim/adam.py
  4. +2
    -2
      mindspore/nn/optim/ftrl.py
  5. +2
    -2
      mindspore/nn/optim/lamb.py
  6. +2
    -2
      mindspore/nn/optim/lazyadam.py
  7. +2
    -2
      mindspore/nn/optim/momentum.py
  8. +2
    -2
      mindspore/nn/optim/proximal_ada_grad.py
  9. +2
    -2
      mindspore/nn/optim/rmsprop.py
  10. +2
    -2
      mindspore/nn/optim/sgd.py
  11. +2
    -1
      mindspore/nn/probability/bnn_layers/conv_variational.py
  12. +2
    -1
      mindspore/nn/probability/bnn_layers/dense_variational.py
  13. +11
    -9
      mindspore/nn/probability/toolbox/uncertainty_evaluation.py

+ 6
- 3
mindspore/nn/loss/loss.py View File

@@ -136,7 +136,8 @@ class MSELoss(_Loss):
>>> loss = nn.MSELoss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data)
>>> output = loss(input_data, target_data)
>>> print(output)
0.33333334
"""
def construct(self, base, target):
@@ -495,7 +496,8 @@ class BCELoss(_Loss):
>>> loss = nn.BCELoss(weight=weight, reduction='mean')
>>> inputs = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32)
>>> labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32)
>>> loss(inputs, labels)
>>> output = loss(inputs, labels)
>>> print(output)
1.8952923
"""

@@ -553,7 +555,8 @@ class CosineEmbeddingLoss(_Loss):
>>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32)
>>> y = Tensor(np.array([1,-1]), mindspore.int32)
>>> cosine_embedding_loss = nn.CosineEmbeddingLoss()
>>> cosine_embedding_loss(x1, x2, y)
>>> output = cosine_embedding_loss(x1, x2, y)
>>> print(output)
[0.0003426671]
"""
def __init__(self, margin=0.0, reduction="mean"):


+ 3
- 3
mindspore/nn/metrics/topk.py View File

@@ -34,7 +34,7 @@ class TopKCategoricalAccuracy(Metric):

Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.TopKCategoricalAccuracy(3)
>>> topk.clear()
@@ -98,7 +98,7 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy):

Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.Top1CategoricalAccuracy()
>>> topk.clear()
@@ -116,7 +116,7 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy):

Examples:
>>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.],
>>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32)
>>> y = Tensor(np.array([2, 0, 1]), mindspore.float32)
>>> topk = nn.Top5CategoricalAccuracy()
>>> topk.clear()


+ 4
- 4
mindspore/nn/optim/adam.py View File

@@ -281,8 +281,8 @@ class Adam(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0.
@@ -416,8 +416,8 @@ class AdamWeightDecay(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.


+ 2
- 2
mindspore/nn/optim/ftrl.py View File

@@ -135,8 +135,8 @@ class FTRL(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params},
... {'order_params': net.trainable_params()}]
>>> optim = nn.FTRL(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use weight decay of 0.01.
>>> # The no_conv_params's parameters will use default weight decay of 0.0.


+ 2
- 2
mindspore/nn/optim/lamb.py View File

@@ -245,8 +245,8 @@ class Lamb(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': poly_decay_lr},
>>> {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}]
... {'params': no_conv_params, 'lr': poly_decay_lr},
... {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}]
>>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default


+ 2
- 2
mindspore/nn/optim/lazyadam.py View File

@@ -192,8 +192,8 @@ class LazyAdam(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.


+ 2
- 2
mindspore/nn/optim/momentum.py View File

@@ -113,8 +113,8 @@ class Momentum(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.


+ 2
- 2
mindspore/nn/optim/proximal_ada_grad.py View File

@@ -116,8 +116,8 @@ class ProximalAdagrad(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.ProximalAdagrad(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.


+ 2
- 2
mindspore/nn/optim/rmsprop.py View File

@@ -139,8 +139,8 @@ class RMSProp(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.


+ 2
- 2
mindspore/nn/optim/sgd.py View File

@@ -112,8 +112,8 @@ class SGD(Optimizer):
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': 0.01},
>>> {'order_params': net.trainable_params()}]
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.


+ 2
- 1
mindspore/nn/probability/bnn_layers/conv_variational.py View File

@@ -252,7 +252,8 @@ class ConvReparam(_ConvVariational):
Examples:
>>> net = ConvReparam(120, 240, 4, has_bias=False)
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(1, 240, 1024, 640)
"""


+ 2
- 1
mindspore/nn/probability/bnn_layers/dense_variational.py View File

@@ -190,7 +190,8 @@ class DenseReparam(_DenseVariational):
Examples:
>>> net = DenseReparam(3, 4)
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> net(input).shape
>>> output = net(input).shape
>>> print(output)
(2, 4)
"""



+ 11
- 9
mindspore/nn/probability/toolbox/uncertainty_evaluation.py View File

@@ -59,18 +59,20 @@ class UncertaintyEvaluation:
>>> load_param_into_net(network, param_dict)
>>> ds_train = create_dataset('workspace/mnist/train')
>>> evaluation = UncertaintyEvaluation(model=network,
>>> train_dataset=ds_train,
>>> task_type='classification',
>>> num_classes=10,
>>> epochs=1,
>>> epi_uncer_model_path=None,
>>> ale_uncer_model_path=None,
>>> save_model=False)
... train_dataset=ds_train,
... task_type='classification',
... num_classes=10,
... epochs=1,
... epi_uncer_model_path=None,
... ale_uncer_model_path=None,
... save_model=False)
>>> epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data)
>>> aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data)
>>> epistemic_uncertainty.shape
>>> output = epistemic_uncertainty.shape
>>> print(output)
(32, 10)
>>> aleatoric_uncertainty.shape
>>> output = aleatoric_uncertainty.shape
>>> print(output)
(32,)
"""


Loading…
Cancel
Save