From: @JunYuLiu Reviewed-by: Signed-off-by:tags/v1.1.0
| @@ -143,6 +143,9 @@ class ExpandDims(PrimitiveWithInfer): | |||
| Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the | |||
| value of `axis` is 0. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> expand_dims = P.ExpandDims() | |||
| @@ -192,6 +195,9 @@ class DType(PrimitiveWithInfer): | |||
| Outputs: | |||
| mindspore.dtype, the data type of a tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> output = P.DType()(input_tensor) | |||
| @@ -227,6 +233,9 @@ class SameTypeShape(PrimitiveWithInfer): | |||
| Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`, | |||
| if data type and shape of `input_x` and `input_y` are the same. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| @@ -268,6 +277,9 @@ class Cast(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) | |||
| >>> input_x = Tensor(input_np) | |||
| @@ -341,6 +353,9 @@ class IsSubClass(PrimitiveWithInfer): | |||
| Outputs: | |||
| bool, the check result. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> output = P.IsSubClass()(mindspore.int32, mindspore.intc) | |||
| >>> print(output) | |||
| @@ -377,6 +392,9 @@ class IsInstance(PrimitiveWithInfer): | |||
| Outputs: | |||
| bool, the check result. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> a = 1 | |||
| >>> output = P.IsInstance()(a, mindspore.int32) | |||
| @@ -424,6 +442,9 @@ class Reshape(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | |||
| >>> reshape = P.Reshape() | |||
| @@ -513,6 +534,9 @@ class Shape(PrimitiveWithInfer): | |||
| tuple[int], the output tuple is constructed by multiple integers, | |||
| :math:`(x_1, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | |||
| >>> shape = P.Shape() | |||
| @@ -543,6 +567,9 @@ class DynamicShape(Primitive): | |||
| Outputs: | |||
| Tensor[int], 1-dim Tensor of type int32 | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | |||
| >>> shape = P.DynamicShape() | |||
| @@ -578,6 +605,9 @@ class Squeeze(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | |||
| >>> squeeze = P.Squeeze(2) | |||
| @@ -633,6 +663,9 @@ class Transpose(PrimitiveWithCheck): | |||
| Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the | |||
| shape of `input_x` and the value of `input_perm`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32) | |||
| >>> perm = (0, 2, 1) | |||
| @@ -673,6 +706,9 @@ class Unique(Primitive): | |||
| Tuple, containing Tensor objects `(y, idx)`, `y` is a tensor has the same type as `x`, `idx` is a tensor | |||
| containing indices of elements in the input coressponding to the output tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` ``CPU`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) | |||
| >>> output = P.Unique()(x) | |||
| @@ -700,6 +736,9 @@ class GatherV2(PrimitiveWithCheck): | |||
| Outputs: | |||
| Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) | |||
| >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32) | |||
| @@ -775,6 +814,9 @@ class Padding(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([[8], [10]]), mindspore.float32) | |||
| >>> pad_dim_size = 4 | |||
| @@ -817,6 +859,9 @@ class UniqueWithPad(PrimitiveWithInfer): | |||
| - y (Tensor) - The unique elements filled with pad_num, the shape and type same as x. | |||
| - idx (Tensor) - The index of each value of x in the unique output y, the shape and type same as x. | |||
| Supported Platforms: | |||
| ``Ascend`` ``CPU`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32) | |||
| >>> pad_num = 8 | |||
| @@ -862,6 +907,9 @@ class Split(PrimitiveWithInfer): | |||
| tuple[Tensor], the shape of each output tensor is the same, which is | |||
| :math:`(y_1, y_2, ..., y_S)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> split = P.Split(1, 2) | |||
| >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32) | |||
| @@ -920,6 +968,9 @@ class Rank(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> rank = P.Rank() | |||
| @@ -992,6 +1043,9 @@ class Size(PrimitiveWithInfer): | |||
| int, a scalar representing the elements size of `input_x`, tensor is the number of elements | |||
| in a tensor, :math:`size=x_1*x_2*...x_R`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) | |||
| >>> size = P.Size() | |||
| @@ -1032,6 +1086,9 @@ class Fill(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type and shape as input value. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> fill = P.Fill() | |||
| >>> output = fill(mindspore.float32, (2, 2), 1) | |||
| @@ -1162,6 +1219,9 @@ class OnesLike(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x` but filled with ones. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> oneslike = P.OnesLike() | |||
| >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) | |||
| @@ -1195,6 +1255,9 @@ class ZerosLike(PrimitiveWithCheck): | |||
| Outputs: | |||
| Tensor, has the same shape and data type as `input_x` but filled with zeros. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> zeroslike = P.ZerosLike() | |||
| >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32)) | |||
| @@ -1226,6 +1289,9 @@ class TupleToArray(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,). | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> type = P.TupleToArray()((1,2,3)) | |||
| >>> print(type) | |||
| @@ -1269,6 +1335,9 @@ class ScalarToArray(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. 0-D Tensor and the content is the input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> op = P.ScalarToArray() | |||
| >>> data = 1.0 | |||
| @@ -1302,6 +1371,9 @@ class ScalarToTensor(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. 0-D Tensor and the content is the input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> op = P.ScalarToTensor() | |||
| >>> data = 1 | |||
| @@ -1345,6 +1417,9 @@ class InvertPermutation(PrimitiveWithInfer): | |||
| Outputs: | |||
| tuple[int]. It has the same length as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> invert = P.InvertPermutation() | |||
| >>> input_data = (3, 4, 0, 2, 1) | |||
| @@ -1410,6 +1485,9 @@ class Argmax(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, indices of the max value of input tensor across the axis. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) | |||
| >>> output = P.Argmax(output_type=mindspore.int32)(input_x) | |||
| @@ -1459,6 +1537,9 @@ class Argmin(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, indices of the min value of input tensor across the axis. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) | |||
| >>> index = P.Argmin()(input_x) | |||
| @@ -1517,6 +1598,9 @@ class ArgMaxWithValue(PrimitiveWithInfer): | |||
| :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. | |||
| - output_x (Tensor) - The maximum value of input tensor, with the same shape as index. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.rand(5), mindspore.float32) | |||
| >>> index, output = P.ArgMaxWithValue()(input_x) | |||
| @@ -1569,6 +1653,9 @@ class ArgMinWithValue(PrimitiveWithInfer): | |||
| :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. | |||
| - output_x (Tensor) - The minimum value of input tensor, with the same shape as index. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.rand(5), mindspore.float32) | |||
| >>> output = P.ArgMinWithValue()(input_x) | |||
| @@ -1624,6 +1711,9 @@ class Tile(PrimitiveWithInfer): | |||
| then the shape of their corresponding positions can be multiplied, and | |||
| the shape of Outputs is :math:`(1*y_1, ..., x_S*y_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> tile = P.Tile() | |||
| >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32) | |||
| @@ -1699,6 +1789,9 @@ class UnsortedSegmentSum(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32) | |||
| >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32) | |||
| @@ -1774,6 +1867,9 @@ class UnsortedSegmentMin(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)) | |||
| >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) | |||
| @@ -1830,6 +1926,9 @@ class UnsortedSegmentMax(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)) | |||
| >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32)) | |||
| @@ -1883,6 +1982,9 @@ class UnsortedSegmentProd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32)) | |||
| >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32)) | |||
| @@ -1947,6 +2049,9 @@ class Concat(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) | |||
| >>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) | |||
| @@ -1995,6 +2100,9 @@ class ParallelConcat(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, data type is the same as `values`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32)) | |||
| >>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32)) | |||
| @@ -2079,6 +2187,9 @@ class Pack(PrimitiveWithInfer): | |||
| or if axis is out of the range [-(R+1), R+1); | |||
| or if the shapes of elements in input_x are not the same. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> data1 = Tensor(np.array([0, 1]).astype(np.float32)) | |||
| >>> data2 = Tensor(np.array([2, 3]).astype(np.float32)) | |||
| @@ -2131,6 +2242,9 @@ class Unpack(PrimitiveWithInfer): | |||
| Raises: | |||
| ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)). | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> unpack = P.Unpack() | |||
| >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) | |||
| @@ -2186,6 +2300,9 @@ class Slice(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape is : input `size`, the data type is the same as input `x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], | |||
| ... [[3, 3, 3], [4, 4, 4]], | |||
| @@ -2240,6 +2357,9 @@ class ReverseV2(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32) | |||
| >>> op = P.ReverseV2(axis=[1]) | |||
| @@ -2279,6 +2399,9 @@ class Rint(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32) | |||
| >>> op = P.Rint() | |||
| @@ -2339,6 +2462,9 @@ class Select(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> select = P.Select() | |||
| >>> input_cond = Tensor([True, False]) | |||
| @@ -2473,6 +2599,9 @@ class StridedSlice(PrimitiveWithInfer): | |||
| i.e., [3, 3, 3]. | |||
| - Finally, the output is [3, 3, 3]. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples | |||
| >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], | |||
| ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32) | |||
| @@ -2722,6 +2851,9 @@ class Eye(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, a tensor with ones on the diagonal and the rest of elements are zero. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> eye = P.Eye() | |||
| >>> output = eye(2, 2, mindspore.int32) | |||
| @@ -2758,6 +2890,9 @@ class ScatterNd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the new tensor, has the same type as `update` and the same shape as `shape`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> op = P.ScatterNd() | |||
| >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32) | |||
| @@ -2811,6 +2946,9 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32) | |||
| >>> resize = P.ResizeNearestNeighbor((2, 2)) | |||
| @@ -2852,6 +2990,9 @@ class GatherNd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:]. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | |||
| >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) | |||
| @@ -2889,6 +3030,9 @@ class TensorScatterUpdate(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | |||
| >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) | |||
| @@ -2942,6 +3086,9 @@ class ScatterUpdate(_ScatterOp_Dynamic): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]) | |||
| >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x") | |||
| @@ -2984,6 +3131,9 @@ class ScatterNdUpdate(_ScatterNdOp): | |||
| Outputs: | |||
| Tensor, has the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``CPU`` | |||
| Examples: | |||
| >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]) | |||
| >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x") | |||
| @@ -3033,6 +3183,9 @@ class ScatterMax(_ScatterOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x") | |||
| >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) | |||
| @@ -3075,6 +3228,9 @@ class ScatterMin(_ScatterOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="input_x") | |||
| >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) | |||
| @@ -3111,6 +3267,9 @@ class ScatterAdd(_ScatterOp_Dynamic): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32) | |||
| @@ -3154,6 +3313,9 @@ class ScatterSub(_ScatterOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([[0, 1]]), mindspore.int32) | |||
| @@ -3189,6 +3351,9 @@ class ScatterMul(_ScatterOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([0, 1]), mindspore.int32) | |||
| @@ -3225,6 +3390,9 @@ class ScatterDiv(_ScatterOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([0, 1]), mindspore.int32) | |||
| @@ -3261,6 +3429,9 @@ class ScatterNdAdd(_ScatterNdOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32) | |||
| @@ -3296,6 +3467,9 @@ class ScatterNdSub(_ScatterNdOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32) | |||
| @@ -3328,6 +3502,9 @@ class ScatterNonAliasingAdd(_ScatterNdOp): | |||
| Outputs: | |||
| Parameter, the updated `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x") | |||
| >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32) | |||
| @@ -3372,6 +3549,9 @@ class SpaceToDepth(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the same data type as `x`. It must be a 4-D tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32) | |||
| >>> block_size = 2 | |||
| @@ -3430,6 +3610,9 @@ class DepthToSpace(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor of shape :math:`(N, C_{in} / \text{block_size}, H_{in} * \text{block_size}, W_{in} * \text{block_size})`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) | |||
| >>> block_size = 2 | |||
| @@ -3496,6 +3679,9 @@ class SpaceToBatch(PrimitiveWithInfer): | |||
| :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_size` | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> block_size = 2 | |||
| >>> paddings = [[0, 0], [0, 0]] | |||
| @@ -3570,6 +3756,9 @@ class BatchToSpace(PrimitiveWithInfer): | |||
| :math:`w' = w*block\_size-crops[1][0]-crops[1][1]` | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> block_size = 2 | |||
| >>> crops = [[0, 0], [0, 0]] | |||
| @@ -3648,6 +3837,9 @@ class SpaceToBatchND(PrimitiveWithInfer): | |||
| :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_shape[1]` | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> block_shape = [2, 2] | |||
| >>> paddings = [[0, 0], [0, 0]] | |||
| @@ -3744,6 +3936,9 @@ class BatchToSpaceND(PrimitiveWithInfer): | |||
| :math:`w' = w*block\_shape[1]-crops[1][0]-crops[1][1]` | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> block_shape = [2, 2] | |||
| >>> crops = [[0, 0], [0, 0]] | |||
| @@ -3824,6 +4019,9 @@ class BroadcastTo(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the given `shape` and the same data type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> shape = (2, 3) | |||
| >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32)) | |||
| @@ -3973,6 +4171,9 @@ class InplaceUpdate(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the input `x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> indices = (0, 1) | |||
| >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) | |||
| @@ -4030,6 +4231,9 @@ class ReverseSequence(PrimitiveWithInfer): | |||
| Outputs: | |||
| Reversed tensor with the same shape and data type as input. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32) | |||
| >>> seq_lengths = Tensor(np.array([1, 2, 3])) | |||
| @@ -4203,6 +4407,9 @@ class Sort(PrimitiveWithInfer): | |||
| - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input. | |||
| - **y2** (Tensor) - The indices of the elements in the original input tensor. Data type is int32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16) | |||
| >>> sort = P.Sort() | |||
| @@ -4250,6 +4457,9 @@ class EmbeddingLookup(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``CPU`` | |||
| Examples: | |||
| >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32) | |||
| >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32) | |||
| @@ -4354,6 +4564,9 @@ class Identity(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64) | |||
| >>> output = P.Identity()(x) | |||
| @@ -33,6 +33,9 @@ class ReduceOp: | |||
| - MAX: Take the maximum. | |||
| - MIN: Take the minimum. | |||
| - PROD: Take the product. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| """ | |||
| SUM = "sum" | |||
| MAX = "max" | |||
| @@ -67,6 +70,9 @@ class AllReduce(PrimitiveWithInfer): | |||
| Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`. | |||
| The contents depend on the specified operation. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> from mindspore.communication import init | |||
| >>> from mindspore import Tensor | |||
| @@ -243,6 +249,9 @@ class AllGather(PrimitiveWithInfer): | |||
| Tensor. If the number of devices in the group is N, | |||
| then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import mindspore.ops.operations as P | |||
| >>> import mindspore.nn as nn | |||
| @@ -356,6 +365,9 @@ class ReduceScatter(PrimitiveWithInfer): | |||
| TypeError: If any of operation and group is not a string. | |||
| ValueError: If the first dimension of the input cannot be divided by the rank size. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> from mindspore import Tensor | |||
| >>> from mindspore.communication import init | |||
| @@ -474,6 +486,9 @@ class Broadcast(PrimitiveWithInfer): | |||
| Raises: | |||
| TypeError: If root_rank is not a integer or group is not a string. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> from mindspore import Tensor | |||
| >>> from mindspore.communication import init | |||
| @@ -49,6 +49,9 @@ class ControlDepend(Primitive): | |||
| Outputs: | |||
| This operation has no actual data output, it will be used to setup the order of relative operations. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -48,6 +48,9 @@ class ScalarSummary(PrimitiveWithInfer): | |||
| - **name** (str) - The name of the input variable, it must not be an empty string. | |||
| - **value** (Tensor) - The value of scalar, and the shape of value must be [] or [1]. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| ... def __init__(self,): | |||
| @@ -87,6 +90,9 @@ class ImageSummary(PrimitiveWithInfer): | |||
| - **name** (str) - The name of the input variable, it must not be an empty string. | |||
| - **value** (Tensor) - The value of image, the rank of tensor must be 4. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -125,6 +131,9 @@ class TensorSummary(PrimitiveWithInfer): | |||
| - **name** (str) - The name of the input variable. | |||
| - **value** (Tensor) - The value of tensor, and the rank of tensor must be greater than 0. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| ... def __init__(self,): | |||
| @@ -164,6 +173,9 @@ class HistogramSummary(PrimitiveWithInfer): | |||
| - **name** (str) - The name of the input variable. | |||
| - **value** (Tensor) - The value of tensor, and the rank of tensor must be greater than 0. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class SummaryDemo(nn.Cell): | |||
| ... def __init__(self,): | |||
| @@ -208,6 +220,9 @@ class InsertGradientOf(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, returns `input_x` directly. `InsertGradientOf` does not affect the forward result. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> def clip_gradient(dx): | |||
| ... ret = dx | |||
| @@ -319,6 +334,9 @@ class Print(PrimitiveWithInfer): | |||
| - **input_x** (Union[Tensor, str]) - The graph node to attach to. The input supports | |||
| multiple strings and tensors which are separated by ','. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> class PrintDemo(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -53,6 +53,9 @@ class CropAndResize(PrimitiveWithInfer): | |||
| Outputs: | |||
| A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth] with type: float32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> class CropAndResizeNet(nn.Cell): | |||
| ... def __init__(self, crop_size): | |||
| @@ -32,6 +32,9 @@ class ScalarCast(PrimitiveWithInfer): | |||
| Outputs: | |||
| Scalar. The type is the same as the python type corresponding to `input_y`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> scalar_cast = P.ScalarCast() | |||
| >>> output = scalar_cast(255.0, mindspore.int32) | |||
| @@ -135,6 +135,9 @@ class TensorAdd(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> add = P.TensorAdd() | |||
| >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32)) | |||
| @@ -170,6 +173,9 @@ class AssignAdd(PrimitiveWithInfer): | |||
| - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`. | |||
| It must have the same shape as `variable` if it is a Tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -222,6 +228,9 @@ class AssignSub(PrimitiveWithInfer): | |||
| - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`. | |||
| It must have the same shape as `variable` if it is a Tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -344,6 +353,9 @@ class ReduceMean(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = P.ReduceMean(keep_dims=True) | |||
| @@ -378,6 +390,9 @@ class ReduceSum(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = P.ReduceSum(keep_dims=True) | |||
| @@ -419,6 +434,9 @@ class ReduceAll(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[True, False], [True, True]])) | |||
| >>> op = P.ReduceAll(keep_dims=True) | |||
| @@ -458,6 +476,9 @@ class ReduceAny(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[True, False], [True, True]])) | |||
| >>> op = P.ReduceAny(keep_dims=True) | |||
| @@ -497,6 +518,9 @@ class ReduceMax(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = P.ReduceMax(keep_dims=True) | |||
| @@ -541,6 +565,9 @@ class ReduceMin(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = P.ReduceMin(keep_dims=True) | |||
| @@ -576,6 +603,9 @@ class ReduceProd(_Reduce): | |||
| - If axis is tuple(int), set as (2, 3), and keep_dims is False, | |||
| the shape of output is :math:`(x_1, x_4, ..., x_R)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | |||
| >>> op = P.ReduceProd(keep_dims=True) | |||
| @@ -601,6 +631,9 @@ class CumProd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> a, b, c, = 1, 2, 3 | |||
| >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32)) | |||
| @@ -662,6 +695,9 @@ class MatMul(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of the output tensor is :math:`(N, M)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.ones(shape=[1, 3]), mindspore.float32) | |||
| >>> input_x2 = Tensor(np.ones(shape=[3, 4]), mindspore.float32) | |||
| @@ -741,6 +777,9 @@ class BatchMatMul(MatMul): | |||
| Outputs: | |||
| Tensor, the shape of the output tensor is :math:`(*B, N, M)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32) | |||
| >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) | |||
| @@ -800,6 +839,9 @@ class CumSum(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape of the output tensor is consistent with the input tensor's. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32)) | |||
| >>> cumsum = P.CumSum() | |||
| @@ -845,6 +887,9 @@ class AddN(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as each entry of the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> class NetAddN(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -928,6 +973,9 @@ class AccumulateNV2(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as each entry of the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> class NetAccumulateNV2(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -987,6 +1035,9 @@ class Neg(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> neg = P.Neg() | |||
| >>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32) | |||
| @@ -1032,6 +1083,9 @@ class InplaceAdd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as input_x. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> indices = (0, 1) | |||
| >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) | |||
| @@ -1091,6 +1145,9 @@ class InplaceSub(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as input_x. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> indices = (0, 1) | |||
| >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32) | |||
| @@ -1155,6 +1212,9 @@ class Sub(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32) | |||
| @@ -1195,6 +1255,9 @@ class Mul(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) | |||
| @@ -1235,6 +1298,9 @@ class SquaredDifference(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32) | |||
| @@ -1259,6 +1325,9 @@ class Square(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> square = P.Square() | |||
| @@ -1298,6 +1367,9 @@ class Rsqrt(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type and shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32) | |||
| >>> rsqrt = P.Rsqrt() | |||
| @@ -1338,6 +1410,9 @@ class Sqrt(PrimitiveWithCheck): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32) | |||
| >>> sqrt = P.Sqrt() | |||
| @@ -1373,6 +1448,9 @@ class Reciprocal(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> reciprocal = P.Reciprocal() | |||
| @@ -1427,6 +1505,9 @@ class Pow(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> input_y = 3.0 | |||
| @@ -1463,6 +1544,9 @@ class Exp(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> exp = P.Exp() | |||
| @@ -1502,6 +1586,9 @@ class Expm1(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> expm1 = P.Expm1() | |||
| @@ -1540,6 +1627,9 @@ class HistogramFixedWidth(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the type is int32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16) | |||
| >>> range = Tensor([0.0, 5.0], mindspore.float16) | |||
| @@ -1578,6 +1668,9 @@ class Log(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> log = P.Log() | |||
| @@ -1616,6 +1709,9 @@ class Log1p(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) | |||
| >>> log1p = P.Log1p() | |||
| @@ -1647,6 +1743,9 @@ class Erf(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32) | |||
| >>> erf = P.Erf() | |||
| @@ -1678,6 +1777,9 @@ class Erfc(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and dtype as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32) | |||
| >>> erfc = P.Erfc() | |||
| @@ -1720,6 +1822,9 @@ class Minimum(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) | |||
| @@ -1760,6 +1865,9 @@ class Maximum(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) | |||
| @@ -1800,6 +1908,9 @@ class RealDiv(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) | |||
| @@ -1841,6 +1952,9 @@ class Div(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | |||
| @@ -1880,6 +1994,9 @@ class DivNoNan(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32) | |||
| @@ -1926,6 +2043,9 @@ class FloorDiv(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) | |||
| @@ -1958,6 +2078,9 @@ class TruncateDiv(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) | |||
| @@ -1989,6 +2112,9 @@ class TruncateMod(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) | |||
| @@ -2021,6 +2147,9 @@ class Mod(_MathBinaryOp): | |||
| Raises: | |||
| ValueError: When `input_x` and `input_y` are not the same dtype. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | |||
| @@ -2048,6 +2177,9 @@ class Floor(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) | |||
| >>> floor = P.Floor() | |||
| @@ -2089,6 +2221,9 @@ class FloorMod(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) | |||
| @@ -2109,6 +2244,9 @@ class Ceil(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) | |||
| >>> ceil_op = P.Ceil() | |||
| @@ -2150,6 +2288,9 @@ class Xdivy(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32) | |||
| @@ -2186,6 +2327,9 @@ class Xlogy(_MathBinaryOp): | |||
| Tensor, the shape is the same as the one after broadcasting, | |||
| and the data type is the one with higher precision or higher digits among the two inputs. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32) | |||
| @@ -2209,6 +2353,9 @@ class Acosh(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> acosh = P.Acosh() | |||
| >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32) | |||
| @@ -2237,6 +2384,9 @@ class Cosh(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> cosh = P.Cosh() | |||
| >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| @@ -2267,6 +2417,9 @@ class Asinh(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> asinh = P.Asinh() | |||
| >>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32) | |||
| @@ -2297,6 +2450,9 @@ class Sinh(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> sinh = P.Sinh() | |||
| >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32) | |||
| @@ -2350,6 +2506,9 @@ class Equal(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> equal = P.Equal() | |||
| @@ -2396,6 +2555,9 @@ class ApproximateEqual(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the shape of 'x1', and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32) | |||
| @@ -2434,6 +2596,9 @@ class EqualCount(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the type same as input tensor and size as (1,). | |||
| Supported Platforms: | |||
| ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) | |||
| @@ -2477,6 +2642,9 @@ class NotEqual(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| >>> not_equal = P.NotEqual() | |||
| @@ -2516,6 +2684,9 @@ class Greater(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) | |||
| @@ -2554,6 +2725,9 @@ class GreaterEqual(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) | |||
| @@ -2592,6 +2766,9 @@ class Less(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) | |||
| @@ -2630,6 +2807,9 @@ class LessEqual(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) | |||
| >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) | |||
| @@ -2658,6 +2838,9 @@ class LogicalNot(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape is the same as the `input_x`, and the dtype is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) | |||
| >>> logical_not = P.LogicalNot() | |||
| @@ -2698,6 +2881,9 @@ class LogicalAnd(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting, and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) | |||
| >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) | |||
| @@ -2730,6 +2916,9 @@ class LogicalOr(_LogicBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) | |||
| >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) | |||
| @@ -2753,6 +2942,9 @@ class IsNan(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape of input, and the dtype is bool. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> is_nan = P.IsNan() | |||
| >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32) | |||
| @@ -2781,6 +2973,9 @@ class IsInf(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape of input, and the dtype is bool. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> is_inf = P.IsInf() | |||
| >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32) | |||
| @@ -2809,6 +3004,9 @@ class IsFinite(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape of input, and the dtype is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> is_finite = P.IsFinite() | |||
| >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32) | |||
| @@ -2841,6 +3039,9 @@ class FloatStatus(PrimitiveWithInfer): | |||
| Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or | |||
| `mindspore.dtype.float16`. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> float_status = P.FloatStatus() | |||
| >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32) | |||
| @@ -2874,6 +3075,9 @@ class NPUAllocFloatStatus(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the shape of `(8,)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> alloc_status = P.NPUAllocFloatStatus() | |||
| >>> output = alloc_status() | |||
| @@ -2908,6 +3112,9 @@ class NPUGetFloatStatus(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> alloc_status = P.NPUAllocFloatStatus() | |||
| >>> get_status = P.NPUGetFloatStatus() | |||
| @@ -2950,6 +3157,9 @@ class NPUClearFloatStatus(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> alloc_status = P.NPUAllocFloatStatus() | |||
| >>> get_status = P.NPUGetFloatStatus() | |||
| @@ -2987,6 +3197,9 @@ class Cos(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> cos = P.Cos() | |||
| >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| @@ -3017,6 +3230,9 @@ class ACos(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> acos = P.ACos() | |||
| >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) | |||
| @@ -3045,6 +3261,9 @@ class Sin(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> sin = P.Sin() | |||
| >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32) | |||
| @@ -3075,6 +3294,9 @@ class Asin(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> asin = P.Asin() | |||
| >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) | |||
| @@ -3125,6 +3347,9 @@ class NMSWithMask(PrimitiveWithInfer): | |||
| - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of | |||
| valid output bounding boxes. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> bbox = np.random.rand(128, 5) | |||
| >>> bbox[:, 2] += bbox[:, 0] | |||
| @@ -3164,6 +3389,9 @@ class Abs(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32) | |||
| >>> abs = P.Abs() | |||
| @@ -3208,6 +3436,9 @@ class Sign(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32) | |||
| >>> sign = P.Sign() | |||
| @@ -3238,6 +3469,9 @@ class Round(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and type as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32) | |||
| >>> round = P.Round() | |||
| @@ -3270,6 +3504,9 @@ class Tan(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> tan = P.Tan() | |||
| >>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32) | |||
| @@ -3300,6 +3537,9 @@ class Atan(PrimitiveWithInfer): | |||
| Outputs: | |||
| A Tensor, has the same type as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32) | |||
| >>> tan = P.Tan() | |||
| @@ -3332,6 +3572,9 @@ class Atanh(PrimitiveWithInfer): | |||
| Outputs: | |||
| A Tensor, has the same type as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32) | |||
| >>> atanh = P.Atanh() | |||
| @@ -3371,6 +3614,9 @@ class Atan2(_MathBinaryOp): | |||
| Outputs: | |||
| Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([0, 1]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([1, 1]), mindspore.float32) | |||
| @@ -3396,6 +3642,9 @@ class SquareSumAll(PrimitiveWithInfer): | |||
| - **output_y1** (Tensor) - The same type as the `input_x1`. | |||
| - **output_y2** (Tensor) - The same type as the `input_x1`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32) | |||
| >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32) | |||
| @@ -3438,6 +3687,9 @@ class BitwiseAnd(_BitwiseBinaryOp): | |||
| Outputs: | |||
| Tensor, has the same type as the `input_x1`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) | |||
| @@ -3465,6 +3717,9 @@ class BitwiseOr(_BitwiseBinaryOp): | |||
| Outputs: | |||
| Tensor, has the same type as the `input_x1`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) | |||
| @@ -3492,6 +3747,9 @@ class BitwiseXor(_BitwiseBinaryOp): | |||
| Outputs: | |||
| Tensor, has the same type as the `input_x1`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16) | |||
| @@ -3513,6 +3771,9 @@ class BesselI0e(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> bessel_i0e = P.BesselI0e() | |||
| >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| @@ -3544,6 +3805,9 @@ class BesselI1e(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> bessel_i1e = P.BesselI1e() | |||
| >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| @@ -3575,6 +3839,9 @@ class Inv(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and data type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> inv = P.Inv() | |||
| >>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32) | |||
| @@ -3606,6 +3873,9 @@ class Invert(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> invert = P.Invert() | |||
| >>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16) | |||
| @@ -3636,6 +3906,9 @@ class Eps(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type and shape as `input_x`, but filled with `input_x` dtype minimum val. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor([4, 1, 2, 3], mindspore.float32) | |||
| >>> output = P.Eps()(input_x) | |||
| @@ -86,6 +86,9 @@ class Flatten(PrimitiveWithInfer): | |||
| Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is | |||
| the product of the remaining dimension. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32) | |||
| >>> flatten = P.Flatten() | |||
| @@ -130,6 +133,9 @@ class Softmax(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the logits. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) | |||
| >>> softmax = P.Softmax() | |||
| @@ -181,6 +187,9 @@ class LogSoftmax(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the logits. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) | |||
| >>> log_softmax = P.LogSoftmax() | |||
| @@ -219,6 +228,9 @@ class Softplus(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) | |||
| >>> softplus = P.Softplus() | |||
| @@ -255,6 +267,9 @@ class Softsign(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32) | |||
| >>> softsign = P.Softsign() | |||
| @@ -288,6 +303,9 @@ class ReLU(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | |||
| >>> relu = P.ReLU() | |||
| @@ -322,6 +340,9 @@ class ReLU6(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | |||
| >>> relu6 = P.ReLU6() | |||
| @@ -357,6 +378,9 @@ class ReLUV2(PrimitiveWithInfer): | |||
| - **output** (Tensor) - Has the same type and shape as the `input_x`. | |||
| - **mask** (Tensor) - A tensor whose data type must be uint8. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32) | |||
| >>> relu_v2 = P.ReLUV2() | |||
| @@ -424,6 +448,9 @@ class Elu(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same shape and data type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | |||
| >>> elu = P.Elu() | |||
| @@ -466,6 +493,9 @@ class HSwish(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_data`. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> hswish = P.HSwish() | |||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | |||
| @@ -503,6 +533,9 @@ class Sigmoid(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the input_x. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) | |||
| >>> sigmoid = P.Sigmoid() | |||
| @@ -542,6 +575,9 @@ class HSigmoid(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the `input_data`. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> hsigmoid = P.HSigmoid() | |||
| >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16) | |||
| @@ -579,6 +615,9 @@ class Tanh(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the input_x. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) | |||
| >>> tanh = P.Tanh() | |||
| @@ -638,6 +677,9 @@ class FusedBatchNorm(Primitive): | |||
| - **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| - **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| Supported Platforms: | |||
| ``CPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -731,6 +773,9 @@ class FusedBatchNormEx(PrimitiveWithInfer): | |||
| data type: float32. | |||
| - **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -811,6 +856,9 @@ class BNTrainingReduce(PrimitiveWithInfer): | |||
| - **sum** (Tensor) - A 1-D Tensor with float32 data type. Tensor of shape :math:`(C,)`. | |||
| - **square_sum** (Tensor) - A 1-D Tensor with float32 data type. Tensor of shape :math:`(C,)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.ones([128, 3, 32, 3]), mindspore.float32) | |||
| >>> bn_training_reduce = P.BNTrainingReduce() | |||
| @@ -868,6 +916,9 @@ class BNTrainingUpdate(PrimitiveWithInfer): | |||
| - **batch_variance** (Tensor) - Tensor for the mean of `variance`, with float32 data type. | |||
| Has the same shape as `variance`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.ones([1, 2, 2, 2]), mindspore.float32) | |||
| >>> sum = Tensor(np.ones([2]), mindspore.float32) | |||
| @@ -968,6 +1019,9 @@ class BatchNorm(PrimitiveWithInfer): | |||
| - **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| - **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.ones([2, 2]), mindspore.float32) | |||
| >>> scale = Tensor(np.ones([2]), mindspore.float32) | |||
| @@ -1074,6 +1128,9 @@ class Conv2D(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32) | |||
| >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32) | |||
| @@ -1219,6 +1276,9 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32) | |||
| >>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32) | |||
| @@ -1430,6 +1490,9 @@ class MaxPool(_Pool): | |||
| Outputs: | |||
| Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32) | |||
| >>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1) | |||
| @@ -1482,6 +1545,9 @@ class MaxPoolWithArgmax(_Pool): | |||
| - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. | |||
| - **mask** (Tensor) - Max values' index represented by the mask. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32) | |||
| >>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1) | |||
| @@ -1562,6 +1628,9 @@ class AvgPool(_Pool): | |||
| Outputs: | |||
| Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -1631,6 +1700,9 @@ class Conv2DBackpropInput(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the gradients w.r.t the input of convolution. It has the same shape as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32) | |||
| >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32) | |||
| @@ -1749,6 +1821,9 @@ class BiasAdd(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same shape and type as `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32) | |||
| >>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32) | |||
| @@ -1798,6 +1873,9 @@ class TopK(PrimitiveWithInfer): | |||
| - **values** (Tensor) - The `k` largest elements in each slice of the last dimensional. | |||
| - **indices** (Tensor) - The indices of values within the last dimension of input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> topk = P.TopK(sorted=True) | |||
| >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16) | |||
| @@ -1846,6 +1924,9 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tuple of 2 tensors, the `loss` shape is `(N,)`, and the `dlogits` with the same shape as `logits`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32) | |||
| >>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32) | |||
| @@ -1902,6 +1983,9 @@ class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer): | |||
| Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor; | |||
| if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`. | |||
| Supported Platforms: | |||
| ``GPU`` ``CPU`` | |||
| Examples: | |||
| Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code. | |||
| """ | |||
| @@ -1959,6 +2043,9 @@ class ApplyMomentum(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, parameters to be updated. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| Please refer to the usage in nn.ApplyMomentum. | |||
| """ | |||
| @@ -2020,6 +2107,9 @@ class SmoothL1Loss(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as `prediction`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> loss = P.SmoothL1Loss() | |||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | |||
| @@ -2063,6 +2153,9 @@ class L2Loss(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples | |||
| >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16) | |||
| >>> l2_loss = P.L2Loss() | |||
| @@ -2100,6 +2193,9 @@ class DataFormatDimMap(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, has the same type as the `input_x`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor([0, 1, 2, 3], mindspore.int32) | |||
| >>> dfdm = P.DataFormatDimMap() | |||
| @@ -2141,6 +2237,9 @@ class RNNTLoss(PrimitiveWithInfer): | |||
| - **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`. | |||
| - **grads** (Tensor[int32]) - Has the same shape as `acts`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> B, T, U, V = 1, 2, 3, 5 | |||
| >>> blank = 0 | |||
| @@ -2207,6 +2306,9 @@ class SGD(PrimitiveWithCheck): | |||
| Outputs: | |||
| Tensor, parameters to be updated. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> sgd = P.SGD() | |||
| >>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32) | |||
| @@ -2286,6 +2388,9 @@ class ApplyRMSProp(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, parameters to be update. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> apply_rms = P.ApplyRMSProp() | |||
| >>> input_x = Tensor(1., mindspore.float32) | |||
| @@ -2385,6 +2490,9 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, parameters to be update. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> centered_rms_prop = P.ApplyCenteredRMSProp() | |||
| >>> input_x = Tensor(np.arange(-2, 2).astype(np.float32).reshape(2, 2), mindspore.float32) | |||
| @@ -2476,6 +2584,9 @@ class LayerNorm(Primitive): | |||
| - **mean** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| - **variance** (Tensor) - Tensor of shape :math:`(C,)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32) | |||
| >>> gamma = Tensor(np.ones([3]), mindspore.float32) | |||
| @@ -2521,6 +2632,9 @@ class L2Normalize(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> l2_normalize = P.L2Normalize() | |||
| >>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32) | |||
| @@ -2565,6 +2679,9 @@ class DropoutGenMask(Primitive): | |||
| Outputs: | |||
| Tensor, the value of generated mask for input shape. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> dropout_gen_mask = P.DropoutGenMask() | |||
| >>> shape = (2, 4, 5) | |||
| @@ -2600,6 +2717,9 @@ class DropoutDoMask(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the value that applied dropout on. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32) | |||
| >>> shape = (2, 2, 3) | |||
| @@ -2671,6 +2791,9 @@ class ResizeBilinear(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, resized image. 4-D with shape [batch, channels, new_height, new_width] in `float32`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32) | |||
| >>> resize_bilinear = P.ResizeBilinear((5, 5)) | |||
| @@ -2727,6 +2850,9 @@ class OneHot(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32) | |||
| >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32) | |||
| @@ -2784,6 +2910,9 @@ class Gelu(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same type and shape as input. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) | |||
| >>> gelu = P.Gelu() | |||
| @@ -2827,6 +2956,9 @@ class GetNext(PrimitiveWithInfer): | |||
| tuple[Tensor], the output of Dataset. The shape is described in `shapes` | |||
| and the type is described is `types`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name') | |||
| >>> feature, label = get_next() | |||
| @@ -2872,6 +3004,9 @@ class PReLU(PrimitiveWithInfer): | |||
| For detailed information, please refer to `nn.PReLU`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -2932,6 +3067,9 @@ class LSTM(PrimitiveWithInfer): | |||
| Performs the long short term memory(LSTM) on the input. | |||
| For detailed information, please refer to `nn.LSTM`. | |||
| Supported Platforms: | |||
| ``GPU`` ``CPU`` | |||
| """ | |||
| @prim_attr_register | |||
| @@ -3027,6 +3165,9 @@ class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same shape and type as input `logits`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]).astype(np.float32)) | |||
| >>> labels = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]).astype(np.float32)) | |||
| @@ -3068,6 +3209,9 @@ class Pad(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the tensor after padding. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) | |||
| >>> pad_op = P.Pad(((1, 2), (2, 1))) | |||
| @@ -3133,6 +3277,9 @@ class MirrorPad(PrimitiveWithInfer): | |||
| is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is | |||
| [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]]. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> from mindspore import Tensor | |||
| >>> from mindspore.ops import operations as P | |||
| @@ -3215,6 +3362,9 @@ class ROIAlign(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32) | |||
| >>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32) | |||
| @@ -3301,6 +3451,9 @@ class Adam(PrimitiveWithInfer): | |||
| - **m** (Tensor) - The same shape and data type as `m`. | |||
| - **v** (Tensor) - The same shape and data type as `v`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -3514,6 +3667,9 @@ class FusedSparseAdam(PrimitiveWithInfer): | |||
| - **m** (Tensor) - A Tensor with shape (1,). | |||
| - **v** (Tensor) - A Tensor with shape (1,). | |||
| Supported Platforms: | |||
| ``CPU`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -3649,6 +3805,9 @@ class FusedSparseLazyAdam(PrimitiveWithInfer): | |||
| - **m** (Tensor) - A Tensor with shape (1,). | |||
| - **v** (Tensor) - A Tensor with shape (1,). | |||
| Supported Platforms: | |||
| ``CPU`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -3758,6 +3917,9 @@ class FusedSparseFtrl(PrimitiveWithInfer): | |||
| - **accum** (Tensor) - A Tensor with shape (1,). | |||
| - **linear** (Tensor) - A Tensor with shape (1,). | |||
| Supported Platforms: | |||
| ``CPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -3949,6 +4111,9 @@ class KLDivLoss(PrimitiveWithInfer): | |||
| Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`. | |||
| Otherwise it is a scalar. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -4025,6 +4190,9 @@ class BinaryCrossEntropy(PrimitiveWithInfer): | |||
| Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`. | |||
| Otherwise, the output is a scalar. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -4123,6 +4291,9 @@ class ApplyAdaMax(PrimitiveWithInfer): | |||
| - **m** (Tensor) - The same shape and data type as `m`. | |||
| - **v** (Tensor) - The same shape and data type as `v`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4252,6 +4423,9 @@ class ApplyAdadelta(PrimitiveWithInfer): | |||
| - **accum** (Tensor) - The same shape and data type as `accum`. | |||
| - **accum_update** (Tensor) - The same shape and data type as `accum_update`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4361,6 +4535,9 @@ class ApplyAdagrad(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **accum** (Tensor) - The same shape and data type as `accum`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4451,6 +4628,9 @@ class ApplyAdagradV2(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **accum** (Tensor) - The same shape and data type as `m`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4542,6 +4722,9 @@ class SparseApplyAdagrad(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **accum** (Tensor) - The same shape and data type as `accum`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4634,6 +4817,9 @@ class SparseApplyAdagradV2(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **accum** (Tensor) - The same shape and data type as `accum`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4840,6 +5026,9 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **accum** (Tensor) - The same shape and data type as `accum`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -4936,6 +5125,9 @@ class ApplyAddSign(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **m** (Tensor) - The same shape and data type as `m`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -5051,6 +5243,9 @@ class ApplyPowerSign(PrimitiveWithInfer): | |||
| - **var** (Tensor) - The same shape and data type as `var`. | |||
| - **m** (Tensor) - The same shape and data type as `m`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -5148,6 +5343,9 @@ class ApplyGradientDescent(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, represents the updated `var`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -5223,6 +5421,9 @@ class ApplyProximalGradientDescent(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, represents the updated `var`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import numpy as np | |||
| >>> import mindspore.nn as nn | |||
| @@ -5306,6 +5507,9 @@ class LARSUpdate(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, represents the new gradient. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> from mindspore import Tensor | |||
| >>> from mindspore.ops import operations as P | |||
| @@ -5393,6 +5597,9 @@ class ApplyFtrl(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, represents the updated `var`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -5489,6 +5696,9 @@ class SparseApplyFtrl(PrimitiveWithCheck): | |||
| - **accum** (Tensor) - Tensor, has the same shape and data type as `accum`. | |||
| - **linear** (Tensor) - Tensor, has the same shape and data type as `linear`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -5590,6 +5800,9 @@ class SparseApplyFtrlV2(PrimitiveWithInfer): | |||
| - **accum** (Tensor) - Tensor, has the same shape and data type as `accum`. | |||
| - **linear** (Tensor) - Tensor, has the same shape and data type as `linear`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -5729,6 +5942,9 @@ class CTCLoss(PrimitiveWithInfer): | |||
| the same type with `inputs`. | |||
| - **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32) | |||
| >>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64) | |||
| @@ -5906,6 +6122,9 @@ class BasicLSTMCell(PrimitiveWithInfer): | |||
| - **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`. | |||
| Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1, 32).astype(np.float16)) | |||
| >>> h = Tensor(np.random.rand(1, 2).astype(np.float16)) | |||
| @@ -6126,6 +6345,9 @@ class InTopK(PrimitiveWithInfer): | |||
| Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`, | |||
| if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32) | |||
| >>> x2 = Tensor(np.array([1, 3]), mindspore.int32) | |||
| @@ -6171,6 +6393,9 @@ class LRN(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the same shape and data type as the input tensor. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> x = Tensor(np.random.rand(1, 2, 2, 2), mindspore.float32) | |||
| >>> lrn = P.LRN() | |||
| @@ -37,6 +37,9 @@ class Assign(PrimitiveWithCheck): | |||
| Outputs: | |||
| Tensor, has the same type as original `variable`. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self): | |||
| @@ -120,6 +123,9 @@ class BoundingBoxEncode(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, encoded bounding boxes. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) | |||
| >>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) | |||
| @@ -173,6 +179,9 @@ class BoundingBoxDecode(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, decoded boxes. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32) | |||
| >>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32) | |||
| @@ -228,6 +237,9 @@ class CheckValid(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with shape of (N,) and dtype of bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> import mindspore | |||
| >>> import mindspore.nn as nn | |||
| @@ -297,6 +309,9 @@ class IOU(PrimitiveWithInfer): | |||
| Raises: | |||
| KeyError: When `mode` is not 'iou' or 'iof'. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> iou = P.IOU() | |||
| >>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16) | |||
| @@ -344,6 +359,9 @@ class MakeRefKey(Primitive): | |||
| Outputs: | |||
| RefKeyType, made from the Parameter name. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> from mindspore.ops import functional as F | |||
| >>> class Net(nn.Cell): | |||
| @@ -545,6 +563,9 @@ class PopulationCount(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor, with the sam shape as the input. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> population_count = P.PopulationCount() | |||
| >>> x_input = Tensor([0, 1, 3], mindspore.int16) | |||
| @@ -34,6 +34,9 @@ class StandardNormal(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. The shape is the same as the input `shape`. The dtype is float32. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> shape = (4, 16) | |||
| >>> stdnormal = P.StandardNormal(seed=2) | |||
| @@ -81,6 +84,9 @@ class StandardLaplace(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. The shape that the input 'shape' denotes. The dtype is float32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> shape = (4, 16) | |||
| >>> stdlaplace = P.StandardLaplace(seed=2) | |||
| @@ -133,6 +139,9 @@ class Gamma(PrimitiveWithInfer): | |||
| Tensor. The shape must be the broadcasted shape of Input "shape" and shapes of alpha and beta. | |||
| The dtype is float32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> shape = (2, 2) | |||
| >>> alpha = Tensor(1.0, mstype.float32) | |||
| @@ -189,6 +198,9 @@ class Poisson(PrimitiveWithInfer): | |||
| Tensor. Its shape must be the broadcasted shape of `shape` and the shape of `mean`. | |||
| The dtype is int32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> shape = (4, 16) | |||
| >>> mean = Tensor(5.0, mstype.float32) | |||
| @@ -244,6 +256,9 @@ class UniformInt(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. The shape is the same as the input 'shape', and the data type is int32. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> shape = (2, 4) | |||
| >>> minval = Tensor(1, mstype.int32) | |||
| @@ -296,6 +311,9 @@ class UniformReal(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor. The shape that the input 'shape' denotes. The dtype is float32. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> shape = (2, 2) | |||
| >>> uniformreal = P.UniformReal(seed=2) | |||
| @@ -350,6 +368,9 @@ class RandomChoiceWithMask(PrimitiveWithInfer): | |||
| - **index** (Tensor) - The output shape is 2-D. | |||
| - **mask** (Tensor) - The output shape is 1-D. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> rnd_choice_mask = P.RandomChoiceWithMask() | |||
| >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) | |||
| @@ -394,6 +415,9 @@ class RandomCategorical(PrimitiveWithInfer): | |||
| Outputs: | |||
| - **output** (Tensor) - The output Tensor with shape [batch_size, num_samples]. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| Examples: | |||
| >>> class Net(nn.Cell): | |||
| ... def __init__(self, num_sample): | |||
| @@ -469,6 +493,9 @@ class Multinomial(PrimitiveWithInfer): | |||
| Outputs: | |||
| Tensor with the same rows as input, each row has num_samples sampled indices. | |||
| Supported Platforms: | |||
| ``GPU`` | |||
| Examples: | |||
| >>> input = Tensor([0., 9., 4., 0.], mstype.float32) | |||
| >>> multinomial = P.Multinomial(seed=10) | |||