| @@ -136,7 +136,8 @@ class MSELoss(_Loss): | |||
| >>> loss = nn.MSELoss() | |||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) | |||
| >>> loss(input_data, target_data) | |||
| >>> output = loss(input_data, target_data) | |||
| >>> print(output) | |||
| 0.33333334 | |||
| """ | |||
| def construct(self, base, target): | |||
| @@ -495,7 +496,8 @@ class BCELoss(_Loss): | |||
| >>> loss = nn.BCELoss(weight=weight, reduction='mean') | |||
| >>> inputs = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]), mindspore.float32) | |||
| >>> labels = Tensor(np.array([[0, 1, 0], [0, 0, 1]]), mindspore.float32) | |||
| >>> loss(inputs, labels) | |||
| >>> output = loss(inputs, labels) | |||
| >>> print(output) | |||
| 1.8952923 | |||
| """ | |||
| @@ -553,7 +555,8 @@ class CosineEmbeddingLoss(_Loss): | |||
| >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) | |||
| >>> y = Tensor(np.array([1,-1]), mindspore.int32) | |||
| >>> cosine_embedding_loss = nn.CosineEmbeddingLoss() | |||
| >>> cosine_embedding_loss(x1, x2, y) | |||
| >>> output = cosine_embedding_loss(x1, x2, y) | |||
| >>> print(output) | |||
| [0.0003426671] | |||
| """ | |||
| def __init__(self, margin=0.0, reduction="mean"): | |||
| @@ -34,7 +34,7 @@ class TopKCategoricalAccuracy(Metric): | |||
| Examples: | |||
| >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], | |||
| >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) | |||
| >>> topk = nn.TopKCategoricalAccuracy(3) | |||
| >>> topk.clear() | |||
| @@ -98,7 +98,7 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy): | |||
| Examples: | |||
| >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], | |||
| >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) | |||
| >>> topk = nn.Top1CategoricalAccuracy() | |||
| >>> topk.clear() | |||
| @@ -116,7 +116,7 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy): | |||
| Examples: | |||
| >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], | |||
| >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| ... [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) | |||
| >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) | |||
| >>> topk = nn.Top5CategoricalAccuracy() | |||
| >>> topk.clear() | |||
| @@ -270,8 +270,8 @@ class Adam(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. | |||
| @@ -405,8 +405,8 @@ class AdamWeightDecay(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. | |||
| @@ -135,8 +135,8 @@ class FTRL(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.FTRL(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use default weight decay of 0.0. | |||
| @@ -245,8 +245,8 @@ class Lamb(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': poly_decay_lr}, | |||
| >>> {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}] | |||
| ... {'params': no_conv_params, 'lr': poly_decay_lr}, | |||
| ... {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}] | |||
| >>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default | |||
| @@ -192,8 +192,8 @@ class LazyAdam(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. | |||
| @@ -113,8 +113,8 @@ class Momentum(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | |||
| @@ -116,8 +116,8 @@ class ProximalAdagrad(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.ProximalAdagrad(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. | |||
| @@ -139,8 +139,8 @@ class RMSProp(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.RMSProp(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | |||
| @@ -112,8 +112,8 @@ class SGD(Optimizer): | |||
| >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) | |||
| >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) | |||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | |||
| >>> {'params': no_conv_params, 'lr': 0.01}, | |||
| >>> {'order_params': net.trainable_params()}] | |||
| ... {'params': no_conv_params, 'lr': 0.01}, | |||
| ... {'order_params': net.trainable_params()}] | |||
| >>> optim = nn.SGD(group_params, learning_rate=0.1, weight_decay=0.0) | |||
| >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. | |||
| >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. | |||
| @@ -252,7 +252,8 @@ class ConvReparam(_ConvVariational): | |||
| Examples: | |||
| >>> net = ConvReparam(120, 240, 4, has_bias=False) | |||
| >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) | |||
| >>> net(input).shape | |||
| >>> output = net(input).shape | |||
| >>> print(output) | |||
| (1, 240, 1024, 640) | |||
| """ | |||
| @@ -190,7 +190,8 @@ class DenseReparam(_DenseVariational): | |||
| Examples: | |||
| >>> net = DenseReparam(3, 4) | |||
| >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) | |||
| >>> net(input).shape | |||
| >>> output = net(input).shape | |||
| >>> print(output) | |||
| (2, 4) | |||
| """ | |||
| @@ -59,18 +59,20 @@ class UncertaintyEvaluation: | |||
| >>> load_param_into_net(network, param_dict) | |||
| >>> ds_train = create_dataset('workspace/mnist/train') | |||
| >>> evaluation = UncertaintyEvaluation(model=network, | |||
| >>> train_dataset=ds_train, | |||
| >>> task_type='classification', | |||
| >>> num_classes=10, | |||
| >>> epochs=1, | |||
| >>> epi_uncer_model_path=None, | |||
| >>> ale_uncer_model_path=None, | |||
| >>> save_model=False) | |||
| ... train_dataset=ds_train, | |||
| ... task_type='classification', | |||
| ... num_classes=10, | |||
| ... epochs=1, | |||
| ... epi_uncer_model_path=None, | |||
| ... ale_uncer_model_path=None, | |||
| ... save_model=False) | |||
| >>> epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data) | |||
| >>> aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data) | |||
| >>> epistemic_uncertainty.shape | |||
| >>> output = epistemic_uncertainty.shape | |||
| >>> print(output) | |||
| (32, 10) | |||
| >>> aleatoric_uncertainty.shape | |||
| >>> output = aleatoric_uncertainty.shape | |||
| >>> print(output) | |||
| (32,) | |||
| """ | |||