Browse Source

!8890 Add labels to python files

From: @JunYuLiu
Reviewed-by: @gemini524
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
d79a454564
28 changed files with 285 additions and 0 deletions
  1. +3
    -0
      mindspore/nn/cell.py
  2. +36
    -0
      mindspore/nn/layer/activation.py
  3. +39
    -0
      mindspore/nn/layer/basic.py
  4. +6
    -0
      mindspore/nn/layer/combined.py
  5. +6
    -0
      mindspore/nn/layer/container.py
  6. +12
    -0
      mindspore/nn/layer/conv.py
  7. +6
    -0
      mindspore/nn/layer/embedding.py
  8. +15
    -0
      mindspore/nn/layer/image.py
  9. +6
    -0
      mindspore/nn/layer/lstm.py
  10. +18
    -0
      mindspore/nn/layer/math.py
  11. +15
    -0
      mindspore/nn/layer/normalization.py
  12. +12
    -0
      mindspore/nn/layer/pooling.py
  13. +21
    -0
      mindspore/nn/layer/quant.py
  14. +18
    -0
      mindspore/nn/loss/loss.py
  15. +9
    -0
      mindspore/nn/optim/adam.py
  16. +3
    -0
      mindspore/nn/optim/ftrl.py
  17. +3
    -0
      mindspore/nn/optim/lamb.py
  18. +3
    -0
      mindspore/nn/optim/lars.py
  19. +3
    -0
      mindspore/nn/optim/lazyadam.py
  20. +3
    -0
      mindspore/nn/optim/momentum.py
  21. +3
    -0
      mindspore/nn/optim/optimizer.py
  22. +3
    -0
      mindspore/nn/optim/proximal_ada_grad.py
  23. +3
    -0
      mindspore/nn/optim/rmsprop.py
  24. +3
    -0
      mindspore/nn/optim/sgd.py
  25. +18
    -0
      mindspore/nn/wrap/cell_wrapper.py
  26. +3
    -0
      mindspore/nn/wrap/grad_reducer.py
  27. +9
    -0
      mindspore/nn/wrap/loss_scale.py
  28. +6
    -0
      mindspore/ops/operations/array_ops.py

+ 3
- 0
mindspore/nn/cell.py View File

@@ -51,6 +51,9 @@ class Cell(Cell_):
Args:
auto_prefix (bool): Recursively generate namespaces. Default: True.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> class MyCell(Cell):
>>> def __init__(self):


+ 36
- 0
mindspore/nn/layer/activation.py View File

@@ -67,6 +67,9 @@ class Softmax(Cell):
Outputs:
Tensor, which has the same type and shape as `x` with values in the range[0,1].

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> softmax = nn.Softmax()
@@ -104,6 +107,9 @@ class LogSoftmax(Cell):
Outputs:
Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0).

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> log_softmax = nn.LogSoftmax()
@@ -174,6 +180,9 @@ class ReLU(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
>>> relu = nn.ReLU()
@@ -204,6 +213,9 @@ class ReLU6(Cell):
Outputs:
Tensor, which has the same type as `input_data`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> relu6 = nn.ReLU6()
@@ -242,6 +254,9 @@ class LeakyReLU(Cell):
Outputs:
Tensor, has the same type and shape as the `input_x`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> leaky_relu = nn.LeakyReLU()
@@ -287,6 +302,9 @@ class Tanh(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
>>> tanh = nn.Tanh()
@@ -319,6 +337,9 @@ class GELU(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> gelu = nn.GELU()
@@ -351,6 +372,9 @@ class Sigmoid(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> sigmoid = nn.Sigmoid()
@@ -390,6 +414,9 @@ class PReLU(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend``

Examples:
>>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
>>> prelu = nn.PReLU()
@@ -444,6 +471,9 @@ class HSwish(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``GPU``

Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> hswish = nn.HSwish()
@@ -479,6 +509,9 @@ class HSigmoid(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``GPU``

Examples:
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> hsigmoid = nn.HSigmoid()
@@ -514,6 +547,9 @@ class LogSigmoid(Cell):
Outputs:
Tensor, with the same type and shape as the `input_data`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.LogSigmoid()
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)


+ 39
- 0
mindspore/nn/layer/basic.py View File

@@ -72,6 +72,9 @@ class Dropout(Cell):
Outputs:
Tensor, output tensor with the same shape as the input.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> net = nn.Dropout(keep_prob=0.8)
@@ -139,6 +142,9 @@ class Flatten(Cell):
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimensions.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32)
>>> net = nn.Flatten()
@@ -195,6 +201,9 @@ class Dense(Cell):
Outputs:
Tensor of shape :math:`(*, out\_channels)`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> net = nn.Dense(3, 4)
@@ -316,6 +325,9 @@ class ClipByNorm(Cell):
Outputs:
Tensor, clipped tensor with the same shape as the input, whose type is float32.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.ClipByNorm()
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
@@ -387,6 +399,9 @@ class Norm(Cell):
Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True;
otherwise a Tensor with dimensions in 'axis' removed is returned.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Norm(axis=0)
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
@@ -444,6 +459,9 @@ class OneHot(Cell):
Tensor, the one-hot tensor of data type 'dtype' with dimension at 'axis' expanded to 'depth' and filled with
on_value and off_value.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = nn.OneHot(depth=4, axis=1)
>>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32)
@@ -501,6 +519,9 @@ class Pad(Cell):
is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is
[[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
@@ -587,6 +608,9 @@ class Interpolate(Cell):
If scale is set, the result is 4-D tensor with shape:math:'(batch, channels, scale_factor * height,
scale_factor * width)' in float32

Supported Platforms:
``Ascend``

Examples:
>>> from mindspore.ops import operations as P
>>> tensor = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32)
@@ -630,6 +654,9 @@ class Unfold(Cell):
Tensor, a 4-D tensor whose data type is same as 'input_x',
and the shape is [out_batch, out_depth, out_row, out_col], the out_batch is the same as the in_batch.

Supported Platforms:
``Ascend``

Examples:
>>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1])
>>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16)
@@ -675,6 +702,9 @@ class Tril(Cell):
Outputs:
Tensor, has the same type as input `x`.

Supported Platforms:
``Ascend``

Examples:
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
>>> tril = nn.Tril()
@@ -757,6 +787,9 @@ class MatrixDiag(Cell):
Outputs:
Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ).

Supported Platforms:
``Ascend``

Examples:
>>> x = Tensor(np.array([1, -1]), mstype.float32)
>>> matrix_diag = nn.MatrixDiag()
@@ -789,6 +822,9 @@ class MatrixDiagPart(Cell):
Outputs:
Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])].

Supported Platforms:
``Ascend``

Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> matrix_diag_part = nn.MatrixDiagPart()
@@ -823,6 +859,9 @@ class MatrixSetDiag(Cell):
Outputs:
Tensor, has the same type and shape as input `x`.

Supported Platforms:
``Ascend``

Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)


+ 6
- 0
mindspore/nn/layer/combined.py View File

@@ -76,6 +76,9 @@ class Conv2dBnAct(Cell):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Conv2dBnAct(120, 240, 4, has_bn=True, activation='relu')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
@@ -168,6 +171,9 @@ class DenseBnAct(Cell):
Outputs:
Tensor of shape :math:`(N, out\_channels)`.

Supported Platforms:
``Ascend``

Examples:
>>> net = nn.DenseBnAct(3, 4)
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)


+ 6
- 0
mindspore/nn/layer/container.py View File

@@ -80,6 +80,9 @@ class SequentialCell(Cell):
Outputs:
Tensor, the output Tensor with shape depending on the input and defined sequence of Cells.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid')
>>> bn = nn.BatchNorm2d(2)
@@ -184,6 +187,9 @@ class CellList(_CellListBase, Cell):
Args:
args (list, optional): List of subclass of Cell.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> conv = nn.Conv2d(100, 20, 3)
>>> bn = nn.BatchNorm2d(20)


+ 12
- 0
mindspore/nn/layer/conv.py View File

@@ -196,6 +196,9 @@ class Conv2d(_Conv):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or `(N, H_{out}, W_{out}, C_{out})`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
@@ -372,6 +375,9 @@ class Conv1d(_Conv):
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
@@ -543,6 +549,9 @@ class Conv2dTranspose(_Conv):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
@@ -719,6 +728,9 @@ class Conv1dTranspose(_Conv):
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)


+ 6
- 0
mindspore/nn/layer/embedding.py View File

@@ -60,6 +60,9 @@ class Embedding(Cell):
Outputs:
Tensor of shape :math:`(\text{batch_size}, \text{input_length}, \text{embedding_size})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.Embedding(20000, 768, True)
>>> input_data = Tensor(np.ones([8, 128]), mindspore.int32)
@@ -160,6 +163,9 @@ class EmbeddingLookup(Cell):
Outputs:
Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> input_indices = Tensor(np.array([[1, 0], [3, 2]]), mindspore.int32)
>>> result = nn.EmbeddingLookup(4,2)(input_indices)


+ 15
- 0
mindspore/nn/layer/image.py View File

@@ -50,6 +50,9 @@ class ImageGradients(Cell):
- **dy** (Tensor) - vertical image gradients, the same type and shape as input.
- **dx** (Tensor) - horizontal image gradients, the same type and shape as input.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32)
@@ -211,6 +214,9 @@ class SSIM(Cell):
Outputs:
Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.SSIM()
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
@@ -289,6 +295,9 @@ class MSSSIM(Cell):
Outputs:
Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1.

Supported Platforms:
``Ascend``

Examples:
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
>>> np.random.seed(0)
@@ -380,6 +389,9 @@ class PSNR(Cell):
Outputs:
Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = nn.PSNR()
>>> img1 = Tensor(np.random.random((1,3,16,16)))
@@ -450,6 +462,9 @@ class CentralCrop(Cell):
Outputs:
Tensor, 3-D or 4-D float tensor, according to the input.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = nn.CentralCrop(central_fraction=0.5)
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)


+ 6
- 0
mindspore/nn/layer/lstm.py View File

@@ -95,6 +95,9 @@ class LSTM(Cell):
- **hx_n** (tuple) - A tuple of two Tensor (h_n, c_n) both of shape
(num_directions * `num_layers`, batch_size, `hidden_size`).

Supported Platforms:
``GPU``

Examples:
>>> net = nn.LSTM(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False)
>>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32))
@@ -303,6 +306,9 @@ class LSTMCell(Cell):
- **reserve** - reserved
- **state** - reserved

Supported Platforms:
``GPU`` ``CPU``

Examples:
>>> net = nn.LSTMCell(10, 12, has_bias=True, batch_first=True, bidirectional=False)
>>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32))


+ 18
- 0
mindspore/nn/layer/math.py View File

@@ -60,6 +60,9 @@ class ReduceLogSumExp(Cell):
- If axis is tuple(int), set as (2, 3), and keep_dims is False,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = nn.ReduceLogSumExp(1, keep_dims=True)
@@ -98,6 +101,9 @@ class Range(Cell):
Outputs:
Tensor, the dtype is int if the dtype of `start`, `limit` and `delta` all are int. Otherwise, dtype is float.

Supported Platforms:
``Ascend``

Examples:
>>> net = nn.Range(1, 8, 2)
>>> output = net()
@@ -152,6 +158,9 @@ class LinSpace(Cell):
Outputs:
Tensor, With type same as `start`. The shape is 1-D with length of `num`.

Supported Platforms:
``Ascend``

Examples:
>>> linspace = nn.LinSpace(1, 10, 5)
>>> output = linspace()
@@ -225,6 +234,9 @@ class LGamma(Cell):
Outputs:
Tensor, has the same shape and dtype as the `input_x`.

Supported Platforms:
``Ascend``

Examples:
>>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> op = nn.LGamma()
@@ -758,6 +770,9 @@ class MatMul(Cell):
Outputs:
Tensor, the shape of the output tensor depends on the dimension of input tensors.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = nn.MatMul()
>>> input_x1 = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32)
@@ -830,6 +845,9 @@ class Moments(Cell):
- **mean** (Tensor) - The mean of input x, with the same date type as input x.
- **variance** (Tensor) - The variance of input x, with the same date type as input x.

Supported Platforms:
``Ascend``

Examples:
>>> net = nn.Moments(axis=3, keep_dims=True)
>>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32)


+ 15
- 0
mindspore/nn/layer/normalization.py View File

@@ -283,6 +283,9 @@ class BatchNorm1d(_BatchNorm):
Outputs:
Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out})`.

Supported Platforms:
``Ascend``

Examples:
>>> net = nn.BatchNorm1d(num_features=4)
>>> np.random.seed(0)
@@ -369,6 +372,9 @@ class BatchNorm2d(_BatchNorm):
Outputs:
Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = nn.BatchNorm2d(num_features=3)
>>> np.random.seed(0)
@@ -458,6 +464,9 @@ class GlobalBatchNorm(_BatchNorm):
Outputs:
Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend``

Examples:
>>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh
>>> import os
@@ -557,6 +566,9 @@ class LayerNorm(Cell):
Outputs:
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape[1:]
@@ -630,6 +642,9 @@ class GroupNorm(Cell):
Outputs:
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> goup_norm_op = nn.GroupNorm(2, 2)
>>> x = Tensor(np.ones([1, 2, 4, 4], np.float32))


+ 12
- 0
mindspore/nn/layer/pooling.py View File

@@ -104,6 +104,9 @@ class MaxPool2d(_PoolNd):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
@@ -171,6 +174,9 @@ class MaxPool1d(_PoolNd):
Outputs:
Tensor of shape :math:`(N, C, L_{out}))`.

Supported Platforms:
``Ascend``

Examples:
>>> max_pool = nn.MaxPool1d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32)
@@ -257,6 +263,9 @@ class AvgPool2d(_PoolNd):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
@@ -322,6 +331,9 @@ class AvgPool1d(_PoolNd):
Outputs:
Tensor of shape :math:`(N, C_{out}, L_{out})`.

Supported Platforms:
``Ascend``

Examples:
>>> pool = nn.AvgPool1d(kernel_size=6, stride=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)


+ 21
- 0
mindspore/nn/layer/quant.py View File

@@ -587,6 +587,9 @@ class Conv2dBnFoldQuant(Cell):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
@@ -772,6 +775,9 @@ class Conv2dBnWithoutFoldQuant(Cell):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
@@ -893,6 +899,9 @@ class Conv2dQuant(Cell):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
@@ -994,6 +1003,9 @@ class DenseQuant(Cell):
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> dense_quant = nn.DenseQuant(3, 6, quant_config=qconfig)
@@ -1100,6 +1112,9 @@ class ActQuant(_QuantActivation):
Outputs:
Tensor, with the same type and shape as the `input`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)
@@ -1162,6 +1177,9 @@ class TensorAddQuant(Cell):
Outputs:
Tensor, with the same type and shape as the `input_x1`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> add_quant = nn.TensorAddQuant(quant_config=qconfig)
@@ -1210,6 +1228,9 @@ class MulQuant(Cell):
Outputs:
Tensor, with the same type and shape as the `input_x1`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> qconfig = compression.quant.create_quant_config()
>>> mul_quant = nn.MulQuant(quant_config=qconfig)


+ 18
- 0
mindspore/nn/loss/loss.py View File

@@ -91,6 +91,9 @@ class L1Loss(_Loss):
Outputs:
Tensor, loss float tensor.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> loss = nn.L1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
@@ -133,6 +136,9 @@ class MSELoss(_Loss):
Outputs:
Tensor, weighted loss float tensor.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> loss = nn.MSELoss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
@@ -180,6 +186,9 @@ class SmoothL1Loss(_Loss):
Outputs:
Tensor, loss float tensor.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> loss = nn.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
@@ -234,6 +243,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
Tensor, a tensor of the same shape as logits with the component-wise
logistic losses.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)
@@ -511,6 +523,9 @@ class BCELoss(_Loss):
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `inputs`.
Otherwise, the output is a scalar.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> weight = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 3.3, 2.2]]), mindspore.float32)
>>> loss = nn.BCELoss(weight=weight, reduction='mean')
@@ -570,6 +585,9 @@ class CosineEmbeddingLoss(_Loss):
- **loss** (Tensor) - If `reduction` is "none", its shape is the same as `y`'s shape, otherwise a scalar value
will be returned.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32)
>>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32)


+ 9
- 0
mindspore/nn/optim/adam.py View File

@@ -272,6 +272,9 @@ class Adam(Optimizer):
Outputs:
Tensor[bool], the value is True.

Supported Platforms:
``Ascend``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
@@ -407,6 +410,9 @@ class AdamWeightDecay(Optimizer):
Outputs:
tuple[bool], all elements are True.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
@@ -535,6 +541,9 @@ class AdamOffload(Optimizer):
Outputs:
Tensor[bool], the value is True.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/ftrl.py View File

@@ -126,6 +126,9 @@ class FTRL(Optimizer):
Outputs:
tuple[Parameter], the updated parameters, the shape is the same as `params`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/lamb.py View File

@@ -235,6 +235,9 @@ class Lamb(Optimizer):
Outputs:
tuple[bool], all elements are True.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/lars.py View File

@@ -68,6 +68,9 @@ class LARS(Optimizer):
Outputs:
Union[Tensor[bool], tuple[Parameter]], it depends on the output of `optimizer`.

Supported Platforms:
``Ascend``

Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()


+ 3
- 0
mindspore/nn/optim/lazyadam.py View File

@@ -183,6 +183,9 @@ class LazyAdam(Optimizer):
Outputs:
Tensor[bool], the value is True.

Supported Platforms:
``Ascend``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/momentum.py View File

@@ -104,6 +104,9 @@ class Momentum(Optimizer):
Raises:
ValueError: If the momentum is less than 0.0.

Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/optimizer.py View File

@@ -83,6 +83,9 @@ class Optimizer(Cell):
Raises:
ValueError: If the learning_rate is a Tensor, but the dimension of tensor is greater than 1.
TypeError: If the learning_rate is not any of the three types: float, Tensor, nor Iterable.

Supported Platforms:
``Ascend`` ``GPU``
"""

def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0):


+ 3
- 0
mindspore/nn/optim/proximal_ada_grad.py View File

@@ -107,6 +107,9 @@ class ProximalAdagrad(Optimizer):
Outputs:
Tensor[bool], the value is True.
Supported Platforms:
``Ascend``
Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/rmsprop.py View File

@@ -130,6 +130,9 @@ class RMSProp(Optimizer):
Outputs:
Tensor[bool], the value is True.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 3
- 0
mindspore/nn/optim/sgd.py View File

@@ -103,6 +103,9 @@ class SGD(Optimizer):
Raises:
ValueError: If the momentum, dampening or weight_decay value is less than 0.0.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay


+ 18
- 0
mindspore/nn/wrap/cell_wrapper.py View File

@@ -78,6 +78,9 @@ class WithLossCell(Cell):
Outputs:
Tensor, a scalar tensor with shape :math:`()`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
@@ -134,6 +137,9 @@ class WithGradCell(Cell):
Outputs:
list, a list of Tensors with identical shapes as trainable weights.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> # For a defined network Net without loss function
>>> net = Net()
@@ -187,6 +193,9 @@ class TrainOneStepCell(Cell):
Outputs:
Tensor, a scalar Tensor with shape :math:`()`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> net = Net()
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
@@ -249,6 +258,9 @@ class GetNextSingleOp(Cell):

For detailed information, refer to `ops.operations.GetNext`.

Supported Platforms:
``GPU``

Examples:
>>> # Refer to dataset_helper.py for detail usage.
>>> data_set = get_dataset()
@@ -346,6 +358,9 @@ class WithEvalCell(Cell):
Tuple, containing a scalar loss Tensor, a network output Tensor of shape :math:`(N, \ldots)`
and a label Tensor of shape :math:`(N, \ldots)`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> # For a defined network Net without loss function
>>> net = Net()
@@ -380,6 +395,9 @@ class ParameterUpdate(Cell):
Raises:
KeyError: If parameter with the specified name does not exist.

Supported Platforms:
``Ascend``

Examples:
>>> network = Net()
>>> param = network.parameters_dict()['learning_rate']


+ 3
- 0
mindspore/nn/wrap/grad_reducer.py View File

@@ -245,6 +245,9 @@ class DistributedGradReducer(Cell):
Raises:
ValueError: If degree is not a int or less than 0.

Supported Platforms:
``Ascend``

Examples:
>>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh
>>> import os


+ 9
- 0
mindspore/nn/wrap/loss_scale.py View File

@@ -75,6 +75,9 @@ class DynamicLossScaleUpdateCell(Cell):
Outputs:
Tensor, a scalar Tensor with shape :math:`()`.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> import numpy as np
>>> from mindspore import Tensor, Parameter, nn
@@ -155,6 +158,9 @@ class FixedLossScaleUpdateCell(Cell):
Args:
loss_scale_value (float): Initializes loss scale.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> import numpy as np
>>> from mindspore import Tensor, Parameter, nn
@@ -220,6 +226,9 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
- **overflow** (Tensor) - Tensor with shape :math:`()`, type is bool.
- **loss scaling value** (Tensor) - Tensor with shape :math:`()`

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> import numpy as np
>>> from mindspore import Tensor, Parameter, nn


+ 6
- 0
mindspore/ops/operations/array_ops.py View File

@@ -1135,6 +1135,9 @@ class Ones(PrimitiveWithInfer):
Outputs:
Tensor, has the same type and shape as input shape value.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> from mindspore.ops import operations as P
>>> ones = P.Ones()
@@ -1185,6 +1188,9 @@ class Zeros(PrimitiveWithInfer):
Outputs:
Tensor, has the same type and shape as input shape value.

Supported Platforms:
``Ascend`` ``GPU``

Examples:
>>> from mindspore.ops import operations as P
>>> zeros = P.Zeros()


Loading…
Cancel
Save