From: @JunYuLiu Reviewed-by: @gemini524 Signed-off-by:tags/v1.1.0
| @@ -51,6 +51,9 @@ class Cell(Cell_): | |||||
| Args: | Args: | ||||
| auto_prefix (bool): Recursively generate namespaces. Default: True. | auto_prefix (bool): Recursively generate namespaces. Default: True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> class MyCell(Cell): | >>> class MyCell(Cell): | ||||
| >>> def __init__(self): | >>> def __init__(self): | ||||
| @@ -67,6 +67,9 @@ class Softmax(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, which has the same type and shape as `x` with values in the range[0,1]. | Tensor, which has the same type and shape as `x` with values in the range[0,1]. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | ||||
| >>> softmax = nn.Softmax() | >>> softmax = nn.Softmax() | ||||
| @@ -104,6 +107,9 @@ class LogSoftmax(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0). | Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0). | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | ||||
| >>> log_softmax = nn.LogSoftmax() | >>> log_softmax = nn.LogSoftmax() | ||||
| @@ -174,6 +180,9 @@ class ReLU(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16) | ||||
| >>> relu = nn.ReLU() | >>> relu = nn.ReLU() | ||||
| @@ -204,6 +213,9 @@ class ReLU6(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, which has the same type as `input_data`. | Tensor, which has the same type as `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | ||||
| >>> relu6 = nn.ReLU6() | >>> relu6 = nn.ReLU6() | ||||
| @@ -242,6 +254,9 @@ class LeakyReLU(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type and shape as the `input_x`. | Tensor, has the same type and shape as the `input_x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | ||||
| >>> leaky_relu = nn.LeakyReLU() | >>> leaky_relu = nn.LeakyReLU() | ||||
| @@ -287,6 +302,9 @@ class Tanh(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16) | ||||
| >>> tanh = nn.Tanh() | >>> tanh = nn.Tanh() | ||||
| @@ -319,6 +337,9 @@ class GELU(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | ||||
| >>> gelu = nn.GELU() | >>> gelu = nn.GELU() | ||||
| @@ -351,6 +372,9 @@ class Sigmoid(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | ||||
| >>> sigmoid = nn.Sigmoid() | >>> sigmoid = nn.Sigmoid() | ||||
| @@ -390,6 +414,9 @@ class PReLU(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32) | >>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32) | ||||
| >>> prelu = nn.PReLU() | >>> prelu = nn.PReLU() | ||||
| @@ -444,6 +471,9 @@ class HSwish(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | ||||
| >>> hswish = nn.HSwish() | >>> hswish = nn.HSwish() | ||||
| @@ -479,6 +509,9 @@ class HSigmoid(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | ||||
| >>> hsigmoid = nn.HSigmoid() | >>> hsigmoid = nn.HSigmoid() | ||||
| @@ -514,6 +547,9 @@ class LogSigmoid(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_data`. | Tensor, with the same type and shape as the `input_data`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.LogSigmoid() | >>> net = nn.LogSigmoid() | ||||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | ||||
| @@ -72,6 +72,9 @@ class Dropout(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, output tensor with the same shape as the input. | Tensor, output tensor with the same shape as the input. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32) | >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32) | ||||
| >>> net = nn.Dropout(keep_prob=0.8) | >>> net = nn.Dropout(keep_prob=0.8) | ||||
| @@ -139,6 +142,9 @@ class Flatten(Cell): | |||||
| Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is | Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is | ||||
| the product of the remaining dimensions. | the product of the remaining dimensions. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32) | >>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32) | ||||
| >>> net = nn.Flatten() | >>> net = nn.Flatten() | ||||
| @@ -195,6 +201,9 @@ class Dense(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(*, out\_channels)`. | Tensor of shape :math:`(*, out\_channels)`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) | >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) | ||||
| >>> net = nn.Dense(3, 4) | >>> net = nn.Dense(3, 4) | ||||
| @@ -316,6 +325,9 @@ class ClipByNorm(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, clipped tensor with the same shape as the input, whose type is float32. | Tensor, clipped tensor with the same shape as the input, whose type is float32. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.ClipByNorm() | >>> net = nn.ClipByNorm() | ||||
| >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) | >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) | ||||
| @@ -387,6 +399,9 @@ class Norm(Cell): | |||||
| Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True; | Tensor, output tensor with dimensions in 'axis' reduced to 1 will be returned if 'keep_dims' is True; | ||||
| otherwise a Tensor with dimensions in 'axis' removed is returned. | otherwise a Tensor with dimensions in 'axis' removed is returned. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Norm(axis=0) | >>> net = nn.Norm(axis=0) | ||||
| >>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32) | >>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32) | ||||
| @@ -444,6 +459,9 @@ class OneHot(Cell): | |||||
| Tensor, the one-hot tensor of data type 'dtype' with dimension at 'axis' expanded to 'depth' and filled with | Tensor, the one-hot tensor of data type 'dtype' with dimension at 'axis' expanded to 'depth' and filled with | ||||
| on_value and off_value. | on_value and off_value. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.OneHot(depth=4, axis=1) | >>> net = nn.OneHot(depth=4, axis=1) | ||||
| >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32) | >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32) | ||||
| @@ -501,6 +519,9 @@ class Pad(Cell): | |||||
| is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is | is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is | ||||
| [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]]. | [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]]. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> from mindspore import Tensor | >>> from mindspore import Tensor | ||||
| >>> from mindspore.ops import operations as P | >>> from mindspore.ops import operations as P | ||||
| @@ -587,6 +608,9 @@ class Interpolate(Cell): | |||||
| If scale is set, the result is 4-D tensor with shape:math:'(batch, channels, scale_factor * height, | If scale is set, the result is 4-D tensor with shape:math:'(batch, channels, scale_factor * height, | ||||
| scale_factor * width)' in float32 | scale_factor * width)' in float32 | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> from mindspore.ops import operations as P | >>> from mindspore.ops import operations as P | ||||
| >>> tensor = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32) | >>> tensor = Tensor([[[[1, 2, 3, 4], [5, 6, 7, 8]]]], mindspore.float32) | ||||
| @@ -630,6 +654,9 @@ class Unfold(Cell): | |||||
| Tensor, a 4-D tensor whose data type is same as 'input_x', | Tensor, a 4-D tensor whose data type is same as 'input_x', | ||||
| and the shape is [out_batch, out_depth, out_row, out_col], the out_batch is the same as the in_batch. | and the shape is [out_batch, out_depth, out_row, out_col], the out_batch is the same as the in_batch. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1]) | >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1]) | ||||
| >>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16) | >>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16) | ||||
| @@ -675,6 +702,9 @@ class Tril(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type as input `x`. | Tensor, has the same type as input `x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.array([[1, 2], [3, 4]])) | >>> x = Tensor(np.array([[1, 2], [3, 4]])) | ||||
| >>> tril = nn.Tril() | >>> tril = nn.Tril() | ||||
| @@ -757,6 +787,9 @@ class MatrixDiag(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ). | Tensor, has the same type as input `x`. The shape must be x.shape + (x.shape[-1], ). | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.array([1, -1]), mstype.float32) | >>> x = Tensor(np.array([1, -1]), mstype.float32) | ||||
| >>> matrix_diag = nn.MatrixDiag() | >>> matrix_diag = nn.MatrixDiag() | ||||
| @@ -789,6 +822,9 @@ class MatrixDiagPart(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])]. | Tensor, has the same type as input `x`. The shape must be x.shape[:-2] + [min(x.shape[-2:])]. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | ||||
| >>> matrix_diag_part = nn.MatrixDiagPart() | >>> matrix_diag_part = nn.MatrixDiagPart() | ||||
| @@ -823,6 +859,9 @@ class MatrixSetDiag(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type and shape as input `x`. | Tensor, has the same type and shape as input `x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | >>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32) | ||||
| >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) | >>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32) | ||||
| @@ -76,6 +76,9 @@ class Conv2dBnAct(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Conv2dBnAct(120, 240, 4, has_bn=True, activation='relu') | >>> net = nn.Conv2dBnAct(120, 240, 4, has_bn=True, activation='relu') | ||||
| >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) | >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) | ||||
| @@ -168,6 +171,9 @@ class DenseBnAct(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, out\_channels)`. | Tensor of shape :math:`(N, out\_channels)`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.DenseBnAct(3, 4) | >>> net = nn.DenseBnAct(3, 4) | ||||
| >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) | >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) | ||||
| @@ -80,6 +80,9 @@ class SequentialCell(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the output Tensor with shape depending on the input and defined sequence of Cells. | Tensor, the output Tensor with shape depending on the input and defined sequence of Cells. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid') | >>> conv = nn.Conv2d(3, 2, 3, pad_mode='valid') | ||||
| >>> bn = nn.BatchNorm2d(2) | >>> bn = nn.BatchNorm2d(2) | ||||
| @@ -184,6 +187,9 @@ class CellList(_CellListBase, Cell): | |||||
| Args: | Args: | ||||
| args (list, optional): List of subclass of Cell. | args (list, optional): List of subclass of Cell. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> conv = nn.Conv2d(100, 20, 3) | >>> conv = nn.Conv2d(100, 20, 3) | ||||
| >>> bn = nn.BatchNorm2d(20) | >>> bn = nn.BatchNorm2d(20) | ||||
| @@ -196,6 +196,9 @@ class Conv2d(_Conv): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or `(N, H_{out}, W_{out}, C_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})` or `(N, H_{out}, W_{out}, C_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') | >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') | ||||
| >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) | >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) | ||||
| @@ -372,6 +375,9 @@ class Conv1d(_Conv): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal') | >>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal') | ||||
| >>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32) | >>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32) | ||||
| @@ -543,6 +549,9 @@ class Conv2dTranspose(_Conv): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad') | >>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad') | ||||
| >>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32) | >>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32) | ||||
| @@ -719,6 +728,9 @@ class Conv1dTranspose(_Conv): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad') | >>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal', pad_mode='pad') | ||||
| >>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32) | >>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32) | ||||
| @@ -60,6 +60,9 @@ class Embedding(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(\text{batch_size}, \text{input_length}, \text{embedding_size})`. | Tensor of shape :math:`(\text{batch_size}, \text{input_length}, \text{embedding_size})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Embedding(20000, 768, True) | >>> net = nn.Embedding(20000, 768, True) | ||||
| >>> input_data = Tensor(np.ones([8, 128]), mindspore.int32) | >>> input_data = Tensor(np.ones([8, 128]), mindspore.int32) | ||||
| @@ -160,6 +163,9 @@ class EmbeddingLookup(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. | Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_indices = Tensor(np.array([[1, 0], [3, 2]]), mindspore.int32) | >>> input_indices = Tensor(np.array([[1, 0], [3, 2]]), mindspore.int32) | ||||
| >>> result = nn.EmbeddingLookup(4,2)(input_indices) | >>> result = nn.EmbeddingLookup(4,2)(input_indices) | ||||
| @@ -50,6 +50,9 @@ class ImageGradients(Cell): | |||||
| - **dy** (Tensor) - vertical image gradients, the same type and shape as input. | - **dy** (Tensor) - vertical image gradients, the same type and shape as input. | ||||
| - **dx** (Tensor) - horizontal image gradients, the same type and shape as input. | - **dx** (Tensor) - horizontal image gradients, the same type and shape as input. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.ImageGradients() | >>> net = nn.ImageGradients() | ||||
| >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32) | >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32) | ||||
| @@ -211,6 +214,9 @@ class SSIM(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1. | Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.SSIM() | >>> net = nn.SSIM() | ||||
| >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) | >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) | ||||
| @@ -289,6 +295,9 @@ class MSSSIM(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1. | Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) | >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) | ||||
| >>> np.random.seed(0) | >>> np.random.seed(0) | ||||
| @@ -380,6 +389,9 @@ class PSNR(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1. | Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.PSNR() | >>> net = nn.PSNR() | ||||
| >>> img1 = Tensor(np.random.random((1,3,16,16))) | >>> img1 = Tensor(np.random.random((1,3,16,16))) | ||||
| @@ -450,6 +462,9 @@ class CentralCrop(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, 3-D or 4-D float tensor, according to the input. | Tensor, 3-D or 4-D float tensor, according to the input. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.CentralCrop(central_fraction=0.5) | >>> net = nn.CentralCrop(central_fraction=0.5) | ||||
| >>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32) | >>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32) | ||||
| @@ -95,6 +95,9 @@ class LSTM(Cell): | |||||
| - **hx_n** (tuple) - A tuple of two Tensor (h_n, c_n) both of shape | - **hx_n** (tuple) - A tuple of two Tensor (h_n, c_n) both of shape | ||||
| (num_directions * `num_layers`, batch_size, `hidden_size`). | (num_directions * `num_layers`, batch_size, `hidden_size`). | ||||
| Supported Platforms: | |||||
| ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.LSTM(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False) | >>> net = nn.LSTM(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False) | ||||
| >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32)) | >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32)) | ||||
| @@ -303,6 +306,9 @@ class LSTMCell(Cell): | |||||
| - **reserve** - reserved | - **reserve** - reserved | ||||
| - **state** - reserved | - **state** - reserved | ||||
| Supported Platforms: | |||||
| ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.LSTMCell(10, 12, has_bias=True, batch_first=True, bidirectional=False) | >>> net = nn.LSTMCell(10, 12, has_bias=True, batch_first=True, bidirectional=False) | ||||
| >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32)) | >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32)) | ||||
| @@ -60,6 +60,9 @@ class ReduceLogSumExp(Cell): | |||||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | - If axis is tuple(int), set as (2, 3), and keep_dims is False, | ||||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | the shape of output is :math:`(x_1, x_4, ..., x_R)`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = nn.ReduceLogSumExp(1, keep_dims=True) | >>> op = nn.ReduceLogSumExp(1, keep_dims=True) | ||||
| @@ -98,6 +101,9 @@ class Range(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the dtype is int if the dtype of `start`, `limit` and `delta` all are int. Otherwise, dtype is float. | Tensor, the dtype is int if the dtype of `start`, `limit` and `delta` all are int. Otherwise, dtype is float. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Range(1, 8, 2) | >>> net = nn.Range(1, 8, 2) | ||||
| >>> output = net() | >>> output = net() | ||||
| @@ -152,6 +158,9 @@ class LinSpace(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, With type same as `start`. The shape is 1-D with length of `num`. | Tensor, With type same as `start`. The shape is 1-D with length of `num`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> linspace = nn.LinSpace(1, 10, 5) | >>> linspace = nn.LinSpace(1, 10, 5) | ||||
| >>> output = linspace() | >>> output = linspace() | ||||
| @@ -225,6 +234,9 @@ class LGamma(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same shape and dtype as the `input_x`. | Tensor, has the same shape and dtype as the `input_x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) | >>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32)) | ||||
| >>> op = nn.LGamma() | >>> op = nn.LGamma() | ||||
| @@ -758,6 +770,9 @@ class MatMul(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the shape of the output tensor depends on the dimension of input tensors. | Tensor, the shape of the output tensor depends on the dimension of input tensors. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.MatMul() | >>> net = nn.MatMul() | ||||
| >>> input_x1 = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32) | >>> input_x1 = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32) | ||||
| @@ -830,6 +845,9 @@ class Moments(Cell): | |||||
| - **mean** (Tensor) - The mean of input x, with the same date type as input x. | - **mean** (Tensor) - The mean of input x, with the same date type as input x. | ||||
| - **variance** (Tensor) - The variance of input x, with the same date type as input x. | - **variance** (Tensor) - The variance of input x, with the same date type as input x. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.Moments(axis=3, keep_dims=True) | >>> net = nn.Moments(axis=3, keep_dims=True) | ||||
| >>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32) | >>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32) | ||||
| @@ -283,6 +283,9 @@ class BatchNorm1d(_BatchNorm): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out})`. | Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.BatchNorm1d(num_features=4) | >>> net = nn.BatchNorm1d(num_features=4) | ||||
| >>> np.random.seed(0) | >>> np.random.seed(0) | ||||
| @@ -369,6 +372,9 @@ class BatchNorm2d(_BatchNorm): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = nn.BatchNorm2d(num_features=3) | >>> net = nn.BatchNorm2d(num_features=3) | ||||
| >>> np.random.seed(0) | >>> np.random.seed(0) | ||||
| @@ -458,6 +464,9 @@ class GlobalBatchNorm(_BatchNorm): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh | >>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh | ||||
| >>> import os | >>> import os | ||||
| @@ -557,6 +566,9 @@ class LayerNorm(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. | Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) | >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) | ||||
| >>> shape1 = x.shape[1:] | >>> shape1 = x.shape[1:] | ||||
| @@ -630,6 +642,9 @@ class GroupNorm(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. | Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> goup_norm_op = nn.GroupNorm(2, 2) | >>> goup_norm_op = nn.GroupNorm(2, 2) | ||||
| >>> x = Tensor(np.ones([1, 2, 4, 4], np.float32)) | >>> x = Tensor(np.ones([1, 2, 4, 4], np.float32)) | ||||
| @@ -104,6 +104,9 @@ class MaxPool2d(_PoolNd): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> pool = nn.MaxPool2d(kernel_size=3, stride=1) | >>> pool = nn.MaxPool2d(kernel_size=3, stride=1) | ||||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | ||||
| @@ -171,6 +174,9 @@ class MaxPool1d(_PoolNd): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C, L_{out}))`. | Tensor of shape :math:`(N, C, L_{out}))`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> max_pool = nn.MaxPool1d(kernel_size=3, stride=1) | >>> max_pool = nn.MaxPool1d(kernel_size=3, stride=1) | ||||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32) | >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32) | ||||
| @@ -257,6 +263,9 @@ class AvgPool2d(_PoolNd): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> pool = nn.AvgPool2d(kernel_size=3, stride=1) | >>> pool = nn.AvgPool2d(kernel_size=3, stride=1) | ||||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) | ||||
| @@ -322,6 +331,9 @@ class AvgPool1d(_PoolNd): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, L_{out})`. | Tensor of shape :math:`(N, C_{out}, L_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> pool = nn.AvgPool1d(kernel_size=6, stride=1) | >>> pool = nn.AvgPool1d(kernel_size=6, stride=1) | ||||
| >>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32) | >>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32) | ||||
| @@ -587,6 +587,9 @@ class Conv2dBnFoldQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | ||||
| @@ -772,6 +775,9 @@ class Conv2dBnWithoutFoldQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid", | ||||
| @@ -893,6 +899,9 @@ class Conv2dQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid", | >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid", | ||||
| @@ -994,6 +1003,9 @@ class DenseQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> dense_quant = nn.DenseQuant(3, 6, quant_config=qconfig) | >>> dense_quant = nn.DenseQuant(3, 6, quant_config=qconfig) | ||||
| @@ -1100,6 +1112,9 @@ class ActQuant(_QuantActivation): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input`. | Tensor, with the same type and shape as the `input`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig) | >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig) | ||||
| @@ -1162,6 +1177,9 @@ class TensorAddQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_x1`. | Tensor, with the same type and shape as the `input_x1`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> add_quant = nn.TensorAddQuant(quant_config=qconfig) | >>> add_quant = nn.TensorAddQuant(quant_config=qconfig) | ||||
| @@ -1210,6 +1228,9 @@ class MulQuant(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, with the same type and shape as the `input_x1`. | Tensor, with the same type and shape as the `input_x1`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> qconfig = compression.quant.create_quant_config() | >>> qconfig = compression.quant.create_quant_config() | ||||
| >>> mul_quant = nn.MulQuant(quant_config=qconfig) | >>> mul_quant = nn.MulQuant(quant_config=qconfig) | ||||
| @@ -91,6 +91,9 @@ class L1Loss(_Loss): | |||||
| Outputs: | Outputs: | ||||
| Tensor, loss float tensor. | Tensor, loss float tensor. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> loss = nn.L1Loss() | >>> loss = nn.L1Loss() | ||||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | ||||
| @@ -133,6 +136,9 @@ class MSELoss(_Loss): | |||||
| Outputs: | Outputs: | ||||
| Tensor, weighted loss float tensor. | Tensor, weighted loss float tensor. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> loss = nn.MSELoss() | >>> loss = nn.MSELoss() | ||||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | ||||
| @@ -180,6 +186,9 @@ class SmoothL1Loss(_Loss): | |||||
| Outputs: | Outputs: | ||||
| Tensor, loss float tensor. | Tensor, loss float tensor. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> loss = nn.SmoothL1Loss() | >>> loss = nn.SmoothL1Loss() | ||||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | ||||
| @@ -234,6 +243,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss): | |||||
| Tensor, a tensor of the same shape as logits with the component-wise | Tensor, a tensor of the same shape as logits with the component-wise | ||||
| logistic losses. | logistic losses. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) | ||||
| >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) | >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) | ||||
| @@ -511,6 +523,9 @@ class BCELoss(_Loss): | |||||
| Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `inputs`. | Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `inputs`. | ||||
| Otherwise, the output is a scalar. | Otherwise, the output is a scalar. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> weight = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 3.3, 2.2]]), mindspore.float32) | >>> weight = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 3.3, 2.2]]), mindspore.float32) | ||||
| >>> loss = nn.BCELoss(weight=weight, reduction='mean') | >>> loss = nn.BCELoss(weight=weight, reduction='mean') | ||||
| @@ -570,6 +585,9 @@ class CosineEmbeddingLoss(_Loss): | |||||
| - **loss** (Tensor) - If `reduction` is "none", its shape is the same as `y`'s shape, otherwise a scalar value | - **loss** (Tensor) - If `reduction` is "none", its shape is the same as `y`'s shape, otherwise a scalar value | ||||
| will be returned. | will be returned. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32) | >>> x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32) | ||||
| >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) | >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) | ||||
| @@ -272,6 +272,9 @@ class Adam(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Tensor[bool], the value is True. | Tensor[bool], the value is True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -407,6 +410,9 @@ class AdamWeightDecay(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| tuple[bool], all elements are True. | tuple[bool], all elements are True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -535,6 +541,9 @@ class AdamOffload(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Tensor[bool], the value is True. | Tensor[bool], the value is True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -126,6 +126,9 @@ class FTRL(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| tuple[Parameter], the updated parameters, the shape is the same as `params`. | tuple[Parameter], the updated parameters, the shape is the same as `params`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -235,6 +235,9 @@ class Lamb(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| tuple[bool], all elements are True. | tuple[bool], all elements are True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -68,6 +68,9 @@ class LARS(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Union[Tensor[bool], tuple[Parameter]], it depends on the output of `optimizer`. | Union[Tensor[bool], tuple[Parameter]], it depends on the output of `optimizer`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss = nn.SoftmaxCrossEntropyWithLogits() | >>> loss = nn.SoftmaxCrossEntropyWithLogits() | ||||
| @@ -183,6 +183,9 @@ class LazyAdam(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Tensor[bool], the value is True. | Tensor[bool], the value is True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -104,6 +104,9 @@ class Momentum(Optimizer): | |||||
| Raises: | Raises: | ||||
| ValueError: If the momentum is less than 0.0. | ValueError: If the momentum is less than 0.0. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` ``CPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -83,6 +83,9 @@ class Optimizer(Cell): | |||||
| Raises: | Raises: | ||||
| ValueError: If the learning_rate is a Tensor, but the dimension of tensor is greater than 1. | ValueError: If the learning_rate is a Tensor, but the dimension of tensor is greater than 1. | ||||
| TypeError: If the learning_rate is not any of the three types: float, Tensor, nor Iterable. | TypeError: If the learning_rate is not any of the three types: float, Tensor, nor Iterable. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| """ | """ | ||||
| def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0): | def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0): | ||||
| @@ -107,6 +107,9 @@ class ProximalAdagrad(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Tensor[bool], the value is True. | Tensor[bool], the value is True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -130,6 +130,9 @@ class RMSProp(Optimizer): | |||||
| Outputs: | Outputs: | ||||
| Tensor[bool], the value is True. | Tensor[bool], the value is True. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -103,6 +103,9 @@ class SGD(Optimizer): | |||||
| Raises: | Raises: | ||||
| ValueError: If the momentum, dampening or weight_decay value is less than 0.0. | ValueError: If the momentum, dampening or weight_decay value is less than 0.0. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> #1) All parameters use the same learning rate and weight decay | >>> #1) All parameters use the same learning rate and weight decay | ||||
| @@ -78,6 +78,9 @@ class WithLossCell(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, a scalar tensor with shape :math:`()`. | Tensor, a scalar tensor with shape :math:`()`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) | ||||
| @@ -134,6 +137,9 @@ class WithGradCell(Cell): | |||||
| Outputs: | Outputs: | ||||
| list, a list of Tensors with identical shapes as trainable weights. | list, a list of Tensors with identical shapes as trainable weights. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> # For a defined network Net without loss function | >>> # For a defined network Net without loss function | ||||
| >>> net = Net() | >>> net = Net() | ||||
| @@ -187,6 +193,9 @@ class TrainOneStepCell(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, a scalar Tensor with shape :math:`()`. | Tensor, a scalar Tensor with shape :math:`()`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits() | ||||
| @@ -249,6 +258,9 @@ class GetNextSingleOp(Cell): | |||||
| For detailed information, refer to `ops.operations.GetNext`. | For detailed information, refer to `ops.operations.GetNext`. | ||||
| Supported Platforms: | |||||
| ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> # Refer to dataset_helper.py for detail usage. | >>> # Refer to dataset_helper.py for detail usage. | ||||
| >>> data_set = get_dataset() | >>> data_set = get_dataset() | ||||
| @@ -346,6 +358,9 @@ class WithEvalCell(Cell): | |||||
| Tuple, containing a scalar loss Tensor, a network output Tensor of shape :math:`(N, \ldots)` | Tuple, containing a scalar loss Tensor, a network output Tensor of shape :math:`(N, \ldots)` | ||||
| and a label Tensor of shape :math:`(N, \ldots)`. | and a label Tensor of shape :math:`(N, \ldots)`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> # For a defined network Net without loss function | >>> # For a defined network Net without loss function | ||||
| >>> net = Net() | >>> net = Net() | ||||
| @@ -380,6 +395,9 @@ class ParameterUpdate(Cell): | |||||
| Raises: | Raises: | ||||
| KeyError: If parameter with the specified name does not exist. | KeyError: If parameter with the specified name does not exist. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> network = Net() | >>> network = Net() | ||||
| >>> param = network.parameters_dict()['learning_rate'] | >>> param = network.parameters_dict()['learning_rate'] | ||||
| @@ -245,6 +245,9 @@ class DistributedGradReducer(Cell): | |||||
| Raises: | Raises: | ||||
| ValueError: If degree is not a int or less than 0. | ValueError: If degree is not a int or less than 0. | ||||
| Supported Platforms: | |||||
| ``Ascend`` | |||||
| Examples: | Examples: | ||||
| >>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh | >>> # This example should be run with multiple processes. Refer to the run_distribute_train.sh | ||||
| >>> import os | >>> import os | ||||
| @@ -75,6 +75,9 @@ class DynamicLossScaleUpdateCell(Cell): | |||||
| Outputs: | Outputs: | ||||
| Tensor, a scalar Tensor with shape :math:`()`. | Tensor, a scalar Tensor with shape :math:`()`. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> import numpy as np | >>> import numpy as np | ||||
| >>> from mindspore import Tensor, Parameter, nn | >>> from mindspore import Tensor, Parameter, nn | ||||
| @@ -155,6 +158,9 @@ class FixedLossScaleUpdateCell(Cell): | |||||
| Args: | Args: | ||||
| loss_scale_value (float): Initializes loss scale. | loss_scale_value (float): Initializes loss scale. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> import numpy as np | >>> import numpy as np | ||||
| >>> from mindspore import Tensor, Parameter, nn | >>> from mindspore import Tensor, Parameter, nn | ||||
| @@ -220,6 +226,9 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell): | |||||
| - **overflow** (Tensor) - Tensor with shape :math:`()`, type is bool. | - **overflow** (Tensor) - Tensor with shape :math:`()`, type is bool. | ||||
| - **loss scaling value** (Tensor) - Tensor with shape :math:`()` | - **loss scaling value** (Tensor) - Tensor with shape :math:`()` | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> import numpy as np | >>> import numpy as np | ||||
| >>> from mindspore import Tensor, Parameter, nn | >>> from mindspore import Tensor, Parameter, nn | ||||
| @@ -1135,6 +1135,9 @@ class Ones(PrimitiveWithInfer): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type and shape as input shape value. | Tensor, has the same type and shape as input shape value. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> from mindspore.ops import operations as P | >>> from mindspore.ops import operations as P | ||||
| >>> ones = P.Ones() | >>> ones = P.Ones() | ||||
| @@ -1185,6 +1188,9 @@ class Zeros(PrimitiveWithInfer): | |||||
| Outputs: | Outputs: | ||||
| Tensor, has the same type and shape as input shape value. | Tensor, has the same type and shape as input shape value. | ||||
| Supported Platforms: | |||||
| ``Ascend`` ``GPU`` | |||||
| Examples: | Examples: | ||||
| >>> from mindspore.ops import operations as P | >>> from mindspore.ops import operations as P | ||||
| >>> zeros = P.Zeros() | >>> zeros = P.Zeros() | ||||