Browse Source

modifg_ops_note

tags/v1.1.0
lilei 5 years ago
parent
commit
43c0092d7f
17 changed files with 385 additions and 396 deletions
  1. +0
    -10
      mindspore/ops/__init__.py
  2. +1
    -1
      mindspore/ops/operations/_cache_ops.py
  3. +1
    -1
      mindspore/ops/operations/_grad_ops.py
  4. +9
    -9
      mindspore/ops/operations/_inner_ops.py
  5. +6
    -6
      mindspore/ops/operations/_quant_ops.py
  6. +13
    -13
      mindspore/ops/operations/_thor_ops.py
  7. +82
    -82
      mindspore/ops/operations/array_ops.py
  8. +13
    -13
      mindspore/ops/operations/comm_ops.py
  9. +7
    -7
      mindspore/ops/operations/control_ops.py
  10. +13
    -13
      mindspore/ops/operations/debug_ops.py
  11. +1
    -1
      mindspore/ops/operations/image_ops.py
  12. +1
    -1
      mindspore/ops/operations/inner_ops.py
  13. +102
    -102
      mindspore/ops/operations/math_ops.py
  14. +112
    -113
      mindspore/ops/operations/nn_ops.py
  15. +13
    -13
      mindspore/ops/operations/other_ops.py
  16. +10
    -10
      mindspore/ops/operations/random_ops.py
  17. +1
    -1
      mindspore/ops/operations/sparse_ops.py

+ 0
- 10
mindspore/ops/__init__.py View File

@@ -18,17 +18,7 @@ Operators can be used in the construct function of Cell.

Examples:

>>> from mindspore.ops import operations as P
>>> from mindspore.ops import composite as C
>>> from mindspore.ops import functional as F
>>> import mindspore.ops as ops

Note:
- The Primitive operators in operations need to be instantiated before being used.
- The composite operators are the pre-defined combination of operators.
- The functional operators are the pre-instantiated Primitive operators, which can be used directly as a function.
- For functional operators usage, please refer to
https://gitee.com/mindspore/mindspore/blob/master/mindspore/ops/functional.py
"""

from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register


+ 1
- 1
mindspore/ops/operations/_cache_ops.py View File

@@ -94,7 +94,7 @@ class SearchCacheIdx(PrimitiveWithInfer):
[21, 9, -5, 1]], np.int32)), name="hashmap")
>>> indices = Tensor(np.array([10, 2, 25, 5, 3], np.int32))
>>> step = 0, emb_max_num = 25, cache_max_num = 10
>>> ops = P.SearchCacheIdx()
>>> ops = ops.SearchCacheIdx()
>>> cache_idx, miss_idx, miss_emb_idx = ops(hashmap, indices, step, emb_max_num, cache_max_num)
cache_idx : [5, 1, 10, -1, 3]
miss_idx : [-1, -1, -1, 3, -1]


+ 1
- 1
mindspore/ops/operations/_grad_ops.py View File

@@ -496,7 +496,7 @@ class DropoutGrad(PrimitiveWithInfer):
Tensor, the value of generated mask for input shape.

Examples:
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
>>> dropout_grad = ops.DropoutGrad(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout_grad(in)
"""


+ 9
- 9
mindspore/ops/operations/_inner_ops.py View File

@@ -130,7 +130,7 @@ class Range(PrimitiveWithInfer):
Tensor, has the same shape and dtype as `input_x`.

Examples:
>>> range = P.Range(1.0, 8.0, 2.0)
>>> range = ops.Range(1.0, 8.0, 2.0)
>>> x = Tensor(np.array([1, 2, 3, 2]), mindspore.int32)
>>> output = range(x)
>>> print(output)
@@ -199,7 +199,7 @@ class Quant(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor([100.0, 150.0], mstype.float32)
>>> quant = P.Quant(80.0, 0.0, False, "Round")
>>> quant = ops.Quant(80.0, 0.0, False, "Round")
>>> y = quant(input_x)
"""

@@ -253,7 +253,7 @@ class Dequant(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor([100.0, 150.0], mstype.float32)
>>> dequant = P.Dequant(False, False)
>>> dequant = ops.Dequant(False, False)
>>> y = dequant(input_x)
"""

@@ -289,7 +289,7 @@ class LinSpace(PrimitiveWithInfer):
Tensor, has the same shape as `assist`.

Examples:
>>> linspace = P.LinSpace()
>>> linspace = ops.LinSpace()
>>> assist = Tensor([5, 5.5], mindspore.float32)
>>> start = Tensor(1, mindspore.float32)
>>> stop = Tensor(10, mindspore.float32)
@@ -329,7 +329,7 @@ class MatrixDiag(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.array([1, -1]), mstype.float32)
>>> assist = Tensor(np.arange(-12, 0).reshape(3, 2, 2), mindspore.float32)
>>> matrix_diag = P.MatrixDiag()
>>> matrix_diag = ops.MatrixDiag()
>>> result = matrix_diag(x, assist)
>>> print(result)
[[[-12. 11.]
@@ -383,7 +383,7 @@ class MatrixDiagPart(PrimitiveWithInfer):
Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> assist = Tensor(np.arange(-12, 0).reshape(3, 2, 2), mindspore.float32)
>>> matrix_diag_part = P.MatrixDiagPart()
>>> matrix_diag_part = ops.MatrixDiagPart()
>>> result = matrix_diag_part(x, assist)
>>> print(result)
[[12., -9.], [8., -5.], [4., -1.]]
@@ -426,7 +426,7 @@ class MatrixSetDiag(PrimitiveWithInfer):
Examples:
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = P.MatrixSetDiag()
>>> matrix_set_diag = ops.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal)
>>> print(result)
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
@@ -523,7 +523,7 @@ class DynamicGRUV2(PrimitiveWithInfer):
>>> bias_i = Tensor(np.random.rand(48).astype(np.float16))
>>> bias_h = Tensor(np.random.rand(48).astype(np.float16))
>>> init_h = Tensor(np.random.rand(8, 16).astype(np.float16))
>>> dynamic_gru_v2 = P.DynamicGRUV2()
>>> dynamic_gru_v2 = ops.DynamicGRUV2()
>>> output = dynamic_gru_v2(x, weight_i, weight_h, bias_i, bias_h, None, init_h)
>>> result = output[0].shape
>>> print(result)
@@ -640,7 +640,7 @@ class ConfusionMulGrad(PrimitiveWithInfer):
the shape of output is :math:`(x_1,x_4,...x_R)`.

Examples:
>>> confusion_mul_grad = P.ConfusionMulGrad()
>>> confusion_mul_grad = ops.ConfusionMulGrad()
>>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)
>>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)


+ 6
- 6
mindspore/ops/operations/_quant_ops.py View File

@@ -752,7 +752,7 @@ class BatchNormFoldGrad(PrimitiveWithInfer):
Performs grad of BatchNormFold operation.

Examples:
>>> batch_norm_fold_grad = P.BatchNormFoldGrad()
>>> batch_norm_fold_grad = ops.BatchNormFoldGrad()
>>> d_batch_mean = Tensor(np.random.randint(-2., 2., (1, 2, 2, 3)), mindspore.float32)
>>> d_batch_std = Tensor(np.random.randn(1, 2, 2, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (4, 1, 4, 6)), mindspore.float32)
@@ -809,7 +809,7 @@ class CorrectionMul(PrimitiveWithInfer):
- **out** (Tensor) - Tensor has the same shape as x.

Examples:
>>> correction_mul = P.CorrectionMul()
>>> correction_mul = ops.CorrectionMul()
>>> input_x = Tensor(np.random.randint(-8, 12, (3, 4)), mindspore.float32)
>>> batch_std = Tensor(np.array([1.5, 3, 2]), mindspore.float32)
>>> running_std = Tensor(np.array([2, 1.2, 0.5]), mindspore.float32)
@@ -842,7 +842,7 @@ class CorrectionMulGrad(PrimitiveWithInfer):
Performs grad of CorrectionMul operation.

Examples:
>>> correction_mul_grad = P.CorrectionMulGrad()
>>> correction_mul_grad = ops.CorrectionMulGrad()
>>> dout = Tensor(np.array([1.5, -2.2, 0.7, -3, 1.6, 2.8]).reshape(2, 1, 1, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (2, 1, 1, 3)), mindspore.float32)
>>> gamma = Tensor(np.array([0.2, -0.2, 2.5, -1.]).reshape(2, 1, 2), mindspore.float32)
@@ -882,7 +882,7 @@ class CorrectionMulGradReduce(PrimitiveWithInfer):
Performs grad reduce of CorrectionMul operation.

Examples:
>>> correction_mul_grad_rd = P.CorrectionMulGradReduce()
>>> correction_mul_grad_rd = ops.CorrectionMulGradReduce()
>>> dout = Tensor(np.array([1.5, -2.2, 0.7, -3, 1.6, 2.8]).reshape(2, 1, 1, 3), mindspore.float32)
>>> input_x = Tensor(np.random.randint(0, 256, (2, 1, 1, 3)), mindspore.float32)
>>> gamma = Tensor(np.array([0.2, -0.2, 2.5, -1.]).reshape(2, 1, 2), mindspore.float32)
@@ -926,7 +926,7 @@ class BatchNormFold2(PrimitiveWithInfer):
- **y** (Tensor) - Tensor has the same shape as x.

Examples:
>>> batch_norm_fold2 = P.BatchNormFold2()
>>> batch_norm_fold2 = ops.BatchNormFold2()
>>> input_x = Tensor(np.random.randint(-6, 6, (4, 3)), mindspore.float32)
>>> beta = Tensor(np.array([0.2, -0.1, 0.25]), mindspore.float32)
>>> gamma = Tensor(np.array([-0.1, -0.25, 0.1]), mindspore.float32)
@@ -974,7 +974,7 @@ class BatchNormFold2Grad(PrimitiveWithInfer):
Performs grad of CorrectionAddGrad operation.

Examples:
>>> bnf2_grad = P.BatchNormFold2Grad()
>>> bnf2_grad = ops.BatchNormFold2Grad()
>>> input_x = Tensor(np.arange(3*3*12*12).reshape(6, 3, 6, 12), mindspore.float32)
>>> dout = Tensor(np.random.randint(-32, 32, (6, 3, 6, 12)), mindspore.float32)
>>> gamma = Tensor(np.random.randint(-4, 4, (3, 1, 1, 2)), mindspore.float32)


+ 13
- 13
mindspore/ops/operations/_thor_ops.py View File

@@ -82,7 +82,7 @@ class CusBatchMatMul(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.ones(shape=[2, 128, 128]), mindspore.float32)
>>> input_y = Tensor(np.ones(shape=[2, 128, 128]), mindspore.float32)
>>> cus_batch_matmul = P.CusBatchMatMul()
>>> cus_batch_matmul = ops.CusBatchMatMul()
>>> output = cus_batch_matmul(input_x, input_y)
"""

@@ -115,7 +115,7 @@ class CusCholeskyTrsm(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.ones(shape=[256, 256]), mindspore.float32)
>>> cus_choleskytrsm = P.CusCholeskyTrsm()
>>> cus_choleskytrsm = ops.CusCholeskyTrsm()
>>> output = matmul(input_x)
"""

@@ -151,7 +151,7 @@ class CusFusedAbsMax1(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
>>> cus_fused_abs_max1 = P.CusFusedAbsMax1()
>>> cus_fused_abs_max1 = ops.CusFusedAbsMax1()
>>> output = cus_fused_abs_max1(input_x)
"""

@@ -187,7 +187,7 @@ class CusImg2Col(PrimitiveWithInfer):
Tensor, the shape of the output tensor is :math:`(N * H_O * W_O, C1 * K_W * K_H * C0)`.
Examples:
>>> input_x = Tensor(np.ones(shape=[32, 3, 224, 224]), mindspore.float16)
>>> cusimg2col = P.CusImg2Col()
>>> cusimg2col = ops.CusImg2Col()
>>> output = cusimg2col(input_x)
"""

@@ -233,7 +233,7 @@ class CusMatMulCubeDenseLeft(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.ones(shape=[16, 16, 16, 16]), mindspore.float16)
>>> input_y = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> matmulcubedenseleft = P.CusMatMulCubeDenseLeft()
>>> matmulcubedenseleft = ops.CusMatMulCubeDenseLeft()
>>> output = matmulcubedenseleft(input_x, input_y)
"""

@@ -268,7 +268,7 @@ class CusMatMulCubeFraczRightMul(PrimitiveWithInfer):
>>> input_x1 = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> input_x2 = Tensor(np.ones(shape=[16, 16, 16, 16]), mindspore.float16)
>>> input_x3 = Tensor(np.ones(shape=[1, ]), mindspore.float16)
>>> cusmatmulfraczrightmul = P.CusMatMulCubeFraczRightMul()
>>> cusmatmulfraczrightmul = ops.CusMatMulCubeFraczRightMul()
>>> output = cusmatmulfraczrightmul(input_x1, input_x2, input_x3)
"""

@@ -307,7 +307,7 @@ class CusMatMulCube(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> input_y = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> cusmatmulcube = P.CusMatMulCube()
>>> cusmatmulcube = ops.CusMatMulCube()
>>> output = matmul(input_x, input_y)
"""

@@ -349,7 +349,7 @@ class CusMatrixCombine(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.ones(shape=[2, 128, 128]), mindspore.float32)
>>> cusmatrixcombine = P.CusMatrixCombine()
>>> cusmatrixcombine = ops.CusMatrixCombine()
>>> output = cusmatrixcombine(input_x)
"""

@@ -383,7 +383,7 @@ class CusTranspose02314(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.ones(shape=[32, 1, 224, 224, 16]), mindspore.float16)
>>> custranspose02314 = P.CusTranspose02314()
>>> custranspose02314 = ops.CusTranspose02314()
>>> output = custranspose02314(input_x)
"""

@@ -429,7 +429,7 @@ class CusMatMulCubeDenseRight(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> input_y = Tensor(np.ones(shape=[16, 16, 16, 16]), mindspore.float16)
>>> cusmatmulcubedenseright = P.CusMatMulCubeDenseRight()
>>> cusmatmulcubedenseright = ops.CusMatMulCubeDenseRight()
>>> output = cusmatmulcubedenseright(input_x, input_y)
"""

@@ -464,7 +464,7 @@ class CusMatMulCubeFraczLeftCast(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.ones(shape=[16, 16, 16, 16]), mindspore.float16)
>>> input_y = Tensor(np.ones(shape=[256, 256]), mindspore.float16)
>>> cusmatmulcubefraczleftcast = P.CusMatMulCubeFraczLeftCast()
>>> cusmatmulcubefraczleftcast = ops.CusMatMulCubeFraczLeftCast()
>>> output = cusmatmulcubefraczleftcast(input_x, input_y)
"""

@@ -494,7 +494,7 @@ class Im2Col(PrimitiveWithInfer):
Tensor.
Examples:
>>> input_x = Tensor(np.random.rand(32, 3, 224, 224).astype(np.float16))
>>> img2col = P.CusMatMulCubeDenseLeft(kernel_size=7, pad=3, stride=2)
>>> img2col = ops.CusMatMulCubeDenseLeft(kernel_size=7, pad=3, stride=2)
>>> output = img2col(input_x)
"""

@@ -587,7 +587,7 @@ class UpdateThorGradient(PrimitiveWithInfer):
>>> for i in range(16):
... input_x3[i,:,:,:] = temp_x3
>>> input_x3 = Tensor(input_x3)
>>> update_thor_gradient = P.UpdateThorGradient(split_dim=128)
>>> update_thor_gradient = ops.UpdateThorGradient(split_dim=128)
>>> output = update_thor_gradient(input_x1, input_x2, input_x3)
"""



+ 82
- 82
mindspore/ops/operations/array_ops.py View File

@@ -148,7 +148,7 @@ class ExpandDims(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> expand_dims = P.ExpandDims()
>>> expand_dims = ops.ExpandDims()
>>> output = expand_dims(input_tensor, 0)
>>> print(output)
[[[2. 2.]
@@ -200,7 +200,7 @@ class DType(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> output = P.DType()(input_tensor)
>>> output = ops.DType()(input_tensor)
>>> print(output)
Float32
"""
@@ -239,7 +239,7 @@ class SameTypeShape(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> output = P.SameTypeShape()(input_x, input_y)
>>> output = ops.SameTypeShape()(input_x, input_y)
>>> print(output)
[[2. 2.]
[2. 2.]]
@@ -284,7 +284,7 @@ class Cast(PrimitiveWithInfer):
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
>>> input_x = Tensor(input_np)
>>> type_dst = mindspore.float16
>>> cast = P.Cast()
>>> cast = ops.Cast()
>>> output = cast(input_x, type_dst)
>>> print(output.dtype)
Float16
@@ -357,7 +357,7 @@ class IsSubClass(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> output = P.IsSubClass()(mindspore.int32, mindspore.intc)
>>> output = ops.IsSubClass()(mindspore.int32, mindspore.intc)
>>> print(output)
True
"""
@@ -397,7 +397,7 @@ class IsInstance(PrimitiveWithInfer):

Examples:
>>> a = 1
>>> output = P.IsInstance()(a, mindspore.int32)
>>> output = ops.IsInstance()(a, mindspore.int32)
>>> print(output)
False
"""
@@ -447,7 +447,7 @@ class Reshape(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> reshape = P.Reshape()
>>> reshape = ops.Reshape()
>>> output = reshape(input_tensor, (3, 2))
>>> print(output)
[[-0.1 0.3]
@@ -539,7 +539,7 @@ class Shape(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> shape = P.Shape()
>>> shape = ops.Shape()
>>> output = shape(input_tensor)
>>> print(output)
(3, 2, 1)
@@ -572,7 +572,7 @@ class DynamicShape(Primitive):

Examples:
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> shape = P.DynamicShape()
>>> shape = ops.DynamicShape()
>>> output = shape(input_tensor)
"""

@@ -610,7 +610,7 @@ class Squeeze(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> squeeze = P.Squeeze(2)
>>> squeeze = ops.Squeeze(2)
>>> output = squeeze(input_tensor)
>>> print(output)
[[1. 1.]
@@ -669,7 +669,7 @@ class Transpose(PrimitiveWithCheck):
Examples:
>>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
>>> perm = (0, 2, 1)
>>> transpose = P.Transpose()
>>> transpose = ops.Transpose()
>>> output = transpose(input_tensor, perm)
>>> print(output)
[[[ 1. 4.]
@@ -711,7 +711,7 @@ class Unique(Primitive):

Examples:
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
>>> output = P.Unique()(x)
>>> output = ops.Unique()(x)
>>> print(output)
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
"""
@@ -743,7 +743,7 @@ class GatherV2(PrimitiveWithCheck):
>>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
>>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
>>> axis = 1
>>> output = P.GatherV2()(input_params, input_indices, axis)
>>> output = ops.GatherV2()(input_params, input_indices, axis)
>>> print(output)
[[ 2. 7.]
[ 4. 54.]
@@ -796,7 +796,7 @@ class SparseGatherV2(GatherV2):
>>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
>>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
>>> axis = 1
>>> out = P.SparseGatherV2()(input_params, input_indices, axis)
>>> out = ops.SparseGatherV2()(input_params, input_indices, axis)
"""


@@ -820,7 +820,7 @@ class Padding(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
>>> pad_dim_size = 4
>>> output = P.Padding(pad_dim_size)(x)
>>> output = ops.Padding(pad_dim_size)(x)
>>> print(output)
[[ 8. 0. 0. 0.]
[10. 0. 0. 0.]]
@@ -865,7 +865,7 @@ class UniqueWithPad(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)
>>> pad_num = 8
>>> output = P.UniqueWithPad()(x, pad_num)
>>> output = ops.UniqueWithPad()(x, pad_num)
>>> print(output)
(Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]),
Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]))
@@ -911,7 +911,7 @@ class Split(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> split = P.Split(1, 2)
>>> split = ops.Split(1, 2)
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
>>> output = split(x)
>>> print(output)
@@ -973,7 +973,7 @@ class Rank(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> rank = P.Rank()
>>> rank = ops.Rank()
>>> output = rank(input_tensor)
>>> print(output)
2
@@ -1009,7 +1009,7 @@ class TruncatedNormal(PrimitiveWithInfer):

Examples:
>>> shape = (1, 2, 3)
>>> truncated_normal = P.TruncatedNormal()
>>> truncated_normal = ops.TruncatedNormal()
>>> output = truncated_normal(shape)
"""

@@ -1048,7 +1048,7 @@ class Size(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> size = P.Size()
>>> size = ops.Size()
>>> output = size(input_tensor)
>>> print(output)
4
@@ -1090,7 +1090,7 @@ class Fill(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> fill = P.Fill()
>>> fill = ops.Fill()
>>> output = fill(mindspore.float32, (2, 2), 1)
>>> print(output)
[[1. 1.]
@@ -1139,8 +1139,8 @@ class Ones(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> from mindspore.ops import operations as P
>>> ones = P.Ones()
>>> from mindspore.ops import operations as ops
>>> ones = ops.Ones()
>>> output = ones((2, 2), mindspore.float32)
>>> print(output)
[[1.0, 1.0],
@@ -1192,8 +1192,8 @@ class Zeros(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> from mindspore.ops import operations as P
>>> zeros = P.Zeros()
>>> from mindspore.ops import operations as ops
>>> zeros = ops.Zeros()
>>> output = zeros((2, 2), mindspore.float32)
>>> print(output)
[[0.0, 0.0],
@@ -1243,7 +1243,7 @@ class OnesLike(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> oneslike = P.OnesLike()
>>> oneslike = ops.OnesLike()
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> output = oneslike(x)
>>> print(output)
@@ -1279,7 +1279,7 @@ class ZerosLike(PrimitiveWithCheck):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> zeroslike = P.ZerosLike()
>>> zeroslike = ops.ZerosLike()
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> output = zeroslike(x)
>>> print(output)
@@ -1313,7 +1313,7 @@ class TupleToArray(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> type = P.TupleToArray()((1,2,3))
>>> type = ops.TupleToArray()((1,2,3))
>>> print(type)
[1 2 3]
"""
@@ -1359,7 +1359,7 @@ class ScalarToArray(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> op = P.ScalarToArray()
>>> op = ops.ScalarToArray()
>>> data = 1.0
>>> output = op(data)
>>> print(output)
@@ -1395,7 +1395,7 @@ class ScalarToTensor(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> op = P.ScalarToTensor()
>>> op = ops.ScalarToTensor()
>>> data = 1
>>> output = op(data, mindspore.float32)
>>> print(output)
@@ -1441,7 +1441,7 @@ class InvertPermutation(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> invert = P.InvertPermutation()
>>> invert = ops.InvertPermutation()
>>> input_data = (3, 4, 0, 2, 1)
>>> output = invert(input_data)
>>> print(output)
@@ -1510,7 +1510,7 @@ class Argmax(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> output = P.Argmax(output_type=mindspore.int32)(input_x)
>>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
>>> print(output)
1
"""
@@ -1562,7 +1562,7 @@ class Argmin(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> index = P.Argmin()(input_x)
>>> index = ops.Argmin()(input_x)
>>> print(index)
2
"""
@@ -1623,7 +1623,7 @@ class ArgMaxWithValue(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
>>> index, output = P.ArgMaxWithValue()(input_x)
>>> index, output = ops.ArgMaxWithValue()(input_x)
"""

@prim_attr_register
@@ -1678,7 +1678,7 @@ class ArgMinWithValue(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
>>> output = P.ArgMinWithValue()(input_x)
>>> output = ops.ArgMinWithValue()(input_x)
>>> print(output)
(Tensor(shape=[], dtype=Int32, value= 2), Tensor(shape=[], dtype=Float32, value= 0.0595638))
"""
@@ -1735,7 +1735,7 @@ class Tile(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> tile = P.Tile()
>>> tile = ops.Tile()
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
>>> multiples = (2, 3)
>>> output = tile(input_x, multiples)
@@ -1816,7 +1816,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer):
>>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
>>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
>>> num_segments = 4
>>> output = P.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
>>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
>>> print(output)
[3. 3. 4. 0.]
"""
@@ -1894,7 +1894,7 @@ class UnsortedSegmentMin(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
>>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
>>> num_segments = 2
>>> unsorted_segment_min = P.UnsortedSegmentMin()
>>> unsorted_segment_min = ops.UnsortedSegmentMin()
>>> output = unsorted_segment_min(input_x, segment_ids, num_segments)
>>> print(output)
[[1. 2. 3.]
@@ -1953,7 +1953,7 @@ class UnsortedSegmentMax(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
>>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
>>> num_segments = 2
>>> unsorted_segment_max = P.UnsortedSegmentMax()
>>> unsorted_segment_max = ops.UnsortedSegmentMax()
>>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
>>> print(output)
[[1. 2. 3.]
@@ -2009,7 +2009,7 @@ class UnsortedSegmentProd(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
>>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))
>>> num_segments = 2
>>> unsorted_segment_prod = P.UnsortedSegmentProd()
>>> unsorted_segment_prod = ops.UnsortedSegmentProd()
>>> output = unsorted_segment_prod(input_x, segment_ids, num_segments)
>>> print(output)
[[4. 4. 3.]
@@ -2075,7 +2075,7 @@ class Concat(PrimitiveWithInfer):
Examples:
>>> data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> op = P.Concat()
>>> op = ops.Concat()
>>> output = op((data1, data2))
>>> print(output)
[[0 1]
@@ -2129,7 +2129,7 @@ class ParallelConcat(PrimitiveWithInfer):
Examples:
>>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32))
>>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))
>>> op = P.ParallelConcat()
>>> op = ops.ParallelConcat()
>>> output = op((data1, data2))
>>> print(output)
[[0 1]
@@ -2216,7 +2216,7 @@ class Pack(PrimitiveWithInfer):
Examples:
>>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
>>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
>>> pack = P.Pack()
>>> pack = ops.Pack()
>>> output = pack([data1, data2])
>>> print(output)
[[0. 1.]
@@ -2269,7 +2269,7 @@ class Unpack(PrimitiveWithInfer):
``Ascend``

Examples:
>>> unpack = P.Unpack()
>>> unpack = ops.Unpack()
>>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
>>> output = unpack(input_x)
>>> print(output)
@@ -2330,7 +2330,7 @@ class Slice(PrimitiveWithInfer):
>>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
... [[3, 3, 3], [4, 4, 4]],
... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
>>> slice = P.Slice()
>>> slice = ops.Slice()
>>> output = slice(data, (1, 0, 0), (1, 1, 3))
>>> print(output)
[[[3 3 3]]]
@@ -2385,7 +2385,7 @@ class ReverseV2(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
>>> op = P.ReverseV2(axis=[1])
>>> op = ops.ReverseV2(axis=[1])
>>> output = op(input_x)
>>> print(output)
[[4 3 2 1]
@@ -2427,7 +2427,7 @@ class Rint(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
>>> op = P.Rint()
>>> op = ops.Rint()
>>> output = op(input_x)
>>> print(output)
[-2. 0. 2. 2.]
@@ -2489,7 +2489,7 @@ class Select(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> select = P.Select()
>>> select = ops.Select()
>>> input_cond = Tensor([True, False])
>>> input_x = Tensor([2,3], mindspore.float32)
>>> input_y = Tensor([1,2], mindspore.float32)
@@ -2628,7 +2628,7 @@ class StridedSlice(PrimitiveWithInfer):
Examples
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> slice = P.StridedSlice()
>>> slice = ops.StridedSlice()
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
>>> print(output)
[[[3. 3. 3.]]]
@@ -2773,7 +2773,7 @@ class Diag(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor([1, 2, 3, 4])
>>> diag = P.Diag()
>>> diag = ops.Diag()
>>> output = diag(input_x)
>>> print(output)
[[1, 0, 0, 0],
@@ -2826,7 +2826,7 @@ class DiagPart(PrimitiveWithInfer):
... [0, 2, 0, 0],
... [0, 0, 3, 0],
... [0, 0, 0, 4]])
>>> diag_part = P.DiagPart()
>>> diag_part = ops.DiagPart()
>>> output = diag_part(input_x)
>>> print(output)
[1 2 3 4]
@@ -2879,7 +2879,7 @@ class Eye(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> eye = P.Eye()
>>> eye = ops.Eye()
>>> output = eye(2, 2, mindspore.int32)
>>> print(output)
[[1 0]
@@ -2918,7 +2918,7 @@ class ScatterNd(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> op = P.ScatterNd()
>>> op = ops.ScatterNd()
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32)
>>> shape = (3, 3)
@@ -2975,7 +2975,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
>>> resize = P.ResizeNearestNeighbor((2, 2))
>>> resize = ops.ResizeNearestNeighbor((2, 2))
>>> output = resize(input_tensor)
>>> print(output)
[[[[-0.1 0.3]
@@ -3020,7 +3020,7 @@ class GatherNd(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> op = P.GatherNd()
>>> op = ops.GatherNd()
>>> output = op(input_x, indices)
>>> print(output)
[-0.1 0.5]
@@ -3061,7 +3061,7 @@ class TensorScatterUpdate(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> op = P.TensorScatterUpdate()
>>> op = ops.TensorScatterUpdate()
>>> output = op(input_x, indices, update)
>>> print(output)
[[ 1. 0.3 3.6]
@@ -3120,7 +3120,7 @@ class ScatterUpdate(_ScatterOp_Dynamic):
>>> indices = Tensor(np.array([0, 1]), mindspore.int32)
>>> np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])
>>> updates = Tensor(np_updates, mindspore.float32)
>>> op = P.ScatterUpdate()
>>> op = ops.ScatterUpdate()
>>> output = op(input_x, indices, updates)
>>> print(output)
[[2. 1.2 1. ]
@@ -3164,7 +3164,7 @@ class ScatterNdUpdate(_ScatterNdOp):
>>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> op = P.ScatterNdUpdate()
>>> op = ops.ScatterNdUpdate()
>>> output = op(input_x, indices, update)
>>> print(output)
[[ 1. 0.3 3.6]
@@ -3215,7 +3215,7 @@ class ScatterMax(_ScatterOp):
>>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x")
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
>>> scatter_max = P.ScatterMax()
>>> scatter_max = ops.ScatterMax()
>>> output = scatter_max(input_x, indices, update)
>>> print(output)
[[88. 88. 88.]
@@ -3260,7 +3260,7 @@ class ScatterMin(_ScatterOp):
>>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="input_x")
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> update = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> scatter_min = P.ScatterMin()
>>> scatter_min = ops.ScatterMin()
>>> output = scatter_min(input_x, indices, update)
>>> print(output)
[[0. 1. 1.]
@@ -3299,7 +3299,7 @@ class ScatterAdd(_ScatterOp_Dynamic):
>>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
>>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> scatter_add = P.ScatterAdd()
>>> scatter_add = ops.ScatterAdd()
>>> output = scatter_add(input_x, indices, updates)
>>> print(output)
[[1. 1. 1.]
@@ -3345,7 +3345,7 @@ class ScatterSub(_ScatterOp):
>>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([[0, 1]]), mindspore.int32)
>>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)
>>> scatter_sub = P.ScatterSub()
>>> scatter_sub = ops.ScatterSub()
>>> output = scatter_sub(input_x, indices, updates)
>>> print(output)
[[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]
@@ -3383,7 +3383,7 @@ class ScatterMul(_ScatterOp):
>>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([0, 1]), mindspore.int32)
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
>>> scatter_mul = P.ScatterMul()
>>> scatter_mul = ops.ScatterMul()
>>> output = scatter_mul(input_x, indices, updates)
>>> print(output)
[[2. 2. 2.]
@@ -3422,7 +3422,7 @@ class ScatterDiv(_ScatterOp):
>>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([0, 1]), mindspore.int32)
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
>>> scatter_div = P.ScatterDiv()
>>> scatter_div = ops.ScatterDiv()
>>> output = scatter_div(input_x, indices, updates)
>>> print(output)
[[3. 3. 3.]
@@ -3461,7 +3461,7 @@ class ScatterNdAdd(_ScatterNdOp):
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_nd_add = P.ScatterNdAdd()
>>> scatter_nd_add = ops.ScatterNdAdd()
>>> output = scatter_nd_add(input_x, indices, updates)
>>> print(output)
[ 1. 10. 9. 4. 12. 6. 7. 17.]
@@ -3499,7 +3499,7 @@ class ScatterNdSub(_ScatterNdOp):
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_nd_sub = P.ScatterNdSub()
>>> scatter_nd_sub = ops.ScatterNdSub()
>>> output = scatter_nd_sub(input_x, indices, updates)
>>> print(output)
[ 1. -6. -3. 4. -2. 6. 7. -1.]
@@ -3534,7 +3534,7 @@ class ScatterNonAliasingAdd(_ScatterNdOp):
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_non_aliasing_add = P.ScatterNonAliasingAdd()
>>> scatter_non_aliasing_add = ops.ScatterNonAliasingAdd()
>>> output = scatter_non_aliasing_add(input_x, indices, updates)
>>> print(output)
[ 1. 10. 9. 4. 12. 6. 7. 17.]
@@ -3580,7 +3580,7 @@ class SpaceToDepth(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
>>> block_size = 2
>>> space_to_depth = P.SpaceToDepth(block_size)
>>> space_to_depth = ops.SpaceToDepth(block_size)
>>> output = space_to_depth(x)
>>> print(output)
(1, 12, 1, 1)
@@ -3641,7 +3641,7 @@ class DepthToSpace(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32)
>>> block_size = 2
>>> depth_to_space = P.DepthToSpace(block_size)
>>> depth_to_space = ops.DepthToSpace(block_size)
>>> output = depth_to_space(x)
>>> print(output.shape)
(1, 3, 2, 2)
@@ -3710,7 +3710,7 @@ class SpaceToBatch(PrimitiveWithInfer):
Examples:
>>> block_size = 2
>>> paddings = [[0, 0], [0, 0]]
>>> space_to_batch = P.SpaceToBatch(block_size, paddings)
>>> space_to_batch = ops.SpaceToBatch(block_size, paddings)
>>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
>>> output = space_to_batch(input_x)
>>> print(output)
@@ -3787,7 +3787,7 @@ class BatchToSpace(PrimitiveWithInfer):
Examples:
>>> block_size = 2
>>> crops = [[0, 0], [0, 0]]
>>> batch_to_space = P.BatchToSpace(block_size, crops)
>>> batch_to_space = ops.BatchToSpace(block_size, crops)
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = batch_to_space(input_x)
>>> print(output)
@@ -3868,7 +3868,7 @@ class SpaceToBatchND(PrimitiveWithInfer):
Examples:
>>> block_shape = [2, 2]
>>> paddings = [[0, 0], [0, 0]]
>>> space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
>>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
>>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
>>> output = space_to_batch_nd(input_x)
>>> print(output)
@@ -3967,7 +3967,7 @@ class BatchToSpaceND(PrimitiveWithInfer):
Examples:
>>> block_shape = [2, 2]
>>> crops = [[0, 0], [0, 0]]
>>> batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
>>> batch_to_space_nd = ops.BatchToSpaceND(block_shape, crops)
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = batch_to_space_nd(input_x)
>>> print(output)
@@ -4050,7 +4050,7 @@ class BroadcastTo(PrimitiveWithInfer):
Examples:
>>> shape = (2, 3)
>>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))
>>> broadcast_to = P.BroadcastTo(shape)
>>> broadcast_to = ops.BroadcastTo(shape)
>>> output = broadcast_to(input_x)
>>> print(output)
[[1. 2. 3.]
@@ -4107,7 +4107,7 @@ class Meshgrid(PrimitiveWithInfer):
>>> y = np.array([5, 6, 7]).astype(np.int32)
>>> z = np.array([8, 9, 0, 1, 2]).astype(np.int32)
>>> inputs = (x, y, z)
>>> meshgrid = P.Meshgrid(indexing="xy")
>>> meshgrid = ops.Meshgrid(indexing="xy")
>>> meshgrid(inputs)
(Tensor(shape=[3, 4, 6], dtype=UInt32, value=
[[[1, 1, 1, 1, 1],
@@ -4203,7 +4203,7 @@ class InplaceUpdate(PrimitiveWithInfer):
>>> indices = (0, 1)
>>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
>>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
>>> inplace_update = P.InplaceUpdate(indices)
>>> inplace_update = ops.InplaceUpdate(indices)
>>> output = inplace_update(x, v)
>>> print(output)
[[0.5 1. ]
@@ -4262,7 +4262,7 @@ class ReverseSequence(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
>>> seq_lengths = Tensor(np.array([1, 2, 3]))
>>> reverse_sequence = P.ReverseSequence(seq_dim=1)
>>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
>>> output = reverse_sequence(x, seq_lengths)
>>> print(output)
[[1. 2. 3.]
@@ -4322,7 +4322,7 @@ class EditDistance(PrimitiveWithInfer):
>>> from mindspore import context
>>> from mindspore import Tensor
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> context.set_context(mode=context.GRAPH_MODE)
>>> class EditDistance(nn.Cell):
... def __init__(self, hypothesis_shape, truth_shape, normalize=True):
@@ -4437,7 +4437,7 @@ class Sort(PrimitiveWithInfer):

Examples:
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
>>> sort = P.Sort()
>>> sort = ops.Sort()
>>> output = sort(x)
>>> print(output)
(Tensor(shape=[3, 3], dtype=Float16, value=
@@ -4489,7 +4489,7 @@ class EmbeddingLookup(PrimitiveWithInfer):
>>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
>>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
>>> offset = 4
>>> output = P.EmbeddingLookup()(input_params, input_indices, offset)
>>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)
>>> print(output)
[[[10. 11.]
[ 0. 0.]]
@@ -4545,7 +4545,7 @@ class GatherD(PrimitiveWithInfer):
>>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
>>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> dim = 1
>>> output = P.GatherD()(x, dim, index)
>>> output = ops.GatherD()(x, dim, index)
>>> print(output)
[[1 1]
[4 3]]
@@ -4594,7 +4594,7 @@ class Identity(PrimitiveWithInfer):

Examples:
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
>>> output = P.Identity()(x)
>>> output = ops.Identity()(x)
>>> print(output)
[1 2 3 4]
"""


+ 13
- 13
mindspore/ops/operations/comm_ops.py View File

@@ -78,13 +78,13 @@ class AllReduce(PrimitiveWithInfer):
>>> from mindspore import Tensor
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>>
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group")
... self.allreduce_sum = ops.AllReduce(ReduceOp.SUM, group="nccl_world_group")
...
... def construct(self, x):
... return self.allreduce_sum(x)
@@ -134,7 +134,7 @@ class Send(PrimitiveWithInfer):
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.

Examples:
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> import mindspore.nn as nn
>>> from mindspore.communication import init
>>> from mindspore import Tensor
@@ -144,8 +144,8 @@ class Send(PrimitiveWithInfer):
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.depend = P.Depend()
>>> self.send = P.Send(st_tag=0, dest_rank=8, group="hccl_world_group")
>>> self.depend = ops.Depend()
>>> self.send = ops.Send(st_tag=0, dest_rank=8, group="hccl_world_group")
>>>
>>> def construct(self, x):
>>> out = self.depend(x, self.send(x))
@@ -191,7 +191,7 @@ class Receive(PrimitiveWithInfer):
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.

Examples:
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> import mindspore.nn as nn
>>> from mindspore.communication import init
>>> from mindspore import Tensor
@@ -201,7 +201,7 @@ class Receive(PrimitiveWithInfer):
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.recv = P.Receive(st_tag=0, src_rank=0, shape=[2, 8], dtype=np.float32,
>>> self.recv = ops.Receive(st_tag=0, src_rank=0, shape=[2, 8], dtype=np.float32,
>>> group="hccl_world_group")
>>>
>>> def construct(self, x):
@@ -253,7 +253,7 @@ class AllGather(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> import mindspore.nn as nn
>>> from mindspore.communication import init
>>> from mindspore import Tensor
@@ -262,7 +262,7 @@ class AllGather(PrimitiveWithInfer):
... class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.allgather = P.AllGather(group="nccl_world_group")
... self.allgather = ops.AllGather(group="nccl_world_group")
...
... def construct(self, x):
... return self.allgather(x)
@@ -373,14 +373,14 @@ class ReduceScatter(PrimitiveWithInfer):
>>> from mindspore.communication import init
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> import numpy as np
>>>
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.reducescatter = P.ReduceScatter(ReduceOp.SUM)
... self.reducescatter = ops.ReduceScatter(ReduceOp.SUM)
...
... def construct(self, x):
... return self.reducescatter(x)
@@ -493,14 +493,14 @@ class Broadcast(PrimitiveWithInfer):
>>> from mindspore import Tensor
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import mindspore.ops.operations as ops
>>> import numpy as np
>>>
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.broadcast = P.Broadcast(1)
... self.broadcast = ops.Broadcast(1)
...
... def construct(self, x):
... return self.broadcast((x,))


+ 7
- 7
mindspore/ops/operations/control_ops.py View File

@@ -57,7 +57,7 @@ class ControlDepend(Primitive):
... def __init__(self):
... super(Net, self).__init__()
... self.control_depend = P.ControlDepend()
... self.softmax = P.Softmax()
... self.softmax = ops.Softmax()
...
... def construct(self, x, y):
... mul = x * y
@@ -104,12 +104,12 @@ class GeSwitch(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.square = P.Square()
... self.add = P.TensorAdd()
... self.square = ops.Square()
... self.add = ops.TensorAdd()
... self.value = Tensor(np.full((1), 3), mindspore.float32)
... self.switch = P.GeSwitch()
... self.merge = P.Merge()
... self.less = P.Less()
... self.switch = ops.GeSwitch()
... self.merge = ops.Merge()
... self.less = ops.Less()
...
... def construct(self, x, y):
... cond = self.less(x, y)
@@ -159,7 +159,7 @@ class Merge(PrimitiveWithInfer):
tuple. Output is tuple(`data`, `output_index`). The `data` has the same shape of `inputs` element.

Examples:
>>> merge = P.Merge()
>>> merge = ops.Merge()
>>> input_x = Tensor(np.linspace(0, 8, 8).reshape(2, 4), mindspore.float32)
>>> input_y = Tensor(np.random.randint(-4, 4, (2, 4)), mindspore.float32)
>>> result = merge((input_x, input_y))


+ 13
- 13
mindspore/ops/operations/debug_ops.py View File

@@ -55,8 +55,8 @@ class ScalarSummary(PrimitiveWithInfer):
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
... super(SummaryDemo, self).__init__()
... self.summary = P.ScalarSummary()
... self.add = P.TensorAdd()
... self.summary = ops.ScalarSummary()
... self.add = ops.TensorAdd()
...
... def construct(self, x, y):
... name = "x"
@@ -97,7 +97,7 @@ class ImageSummary(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.summary = P.ImageSummary()
... self.summary = ops.ImageSummary()
...
... def construct(self, x):
... name = "image"
@@ -138,8 +138,8 @@ class TensorSummary(PrimitiveWithInfer):
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
... super(SummaryDemo, self).__init__()
... self.summary = P.TensorSummary()
... self.add = P.TensorAdd()
... self.summary = ops.TensorSummary()
... self.add = ops.TensorAdd()
...
... def construct(self, x, y):
... x = self.add(x, y)
@@ -180,8 +180,8 @@ class HistogramSummary(PrimitiveWithInfer):
>>> class SummaryDemo(nn.Cell):
... def __init__(self,):
... super(SummaryDemo, self).__init__()
... self.summary = P.HistogramSummary()
... self.add = P.TensorAdd()
... self.summary = ops.HistogramSummary()
... self.add = ops.TensorAdd()
...
... def construct(self, x, y):
... x = self.add(x, y)
@@ -234,8 +234,8 @@ class InsertGradientOf(PrimitiveWithInfer):
...
... return ret
...
>>> clip = P.InsertGradientOf(clip_gradient)
>>> grad_all = C.GradOperation(get_all=True)
>>> clip = ops.InsertGradientOf(clip_gradient)
>>> grad_all = ops.GradOperation(get_all=True)
>>> def InsertGradientOfClipDemo():
... def clip_test(x, y):
... x = clip(x)
@@ -289,7 +289,7 @@ class HookBackward(PrimitiveWithInfer):
... print(grad_out)
...
>>> grad_all = GradOperation(get_all=True)
>>> hook = P.HookBackward(hook_fn)
>>> hook = ops.HookBackward(hook_fn)
>>> def hook_test(x, y):
... z = x * y
... z = hook(z)
@@ -341,7 +341,7 @@ class Print(PrimitiveWithInfer):
>>> class PrintDemo(nn.Cell):
... def __init__(self):
... super(PrintDemo, self).__init__()
... self.print = P.Print()
... self.print = ops.Print()
...
... def construct(self, x, y):
... self.print('Print Tensor x and Tensor y:', x, y)
@@ -382,8 +382,8 @@ class Assert(PrimitiveWithInfer):
>>> class AssertDemo(nn.Cell):
... def __init__(self):
... super(AssertDemo, self).__init__()
... self.assert1 = P.Assert(summarize=10)
... self.add = P.TensorAdd()
... self.assert1 = ops.Assert(summarize=10)
... self.add = ops.TensorAdd()
...
... def construct(self, x, y):
... data = self.add(x, y)


+ 1
- 1
mindspore/ops/operations/image_ops.py View File

@@ -60,7 +60,7 @@ class CropAndResize(PrimitiveWithInfer):
>>> class CropAndResizeNet(nn.Cell):
... def __init__(self, crop_size):
... super(CropAndResizeNet, self).__init__()
... self.crop_and_resize = P.CropAndResize()
... self.crop_and_resize = ops.CropAndResize()
... self.crop_size = crop_size
...
... def construct(self, x, boxes, box_index):


+ 1
- 1
mindspore/ops/operations/inner_ops.py View File

@@ -36,7 +36,7 @@ class ScalarCast(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> scalar_cast = P.ScalarCast()
>>> scalar_cast = ops.ScalarCast()
>>> output = scalar_cast(255.0, mindspore.int32)
>>> print(output)
255


+ 102
- 102
mindspore/ops/operations/math_ops.py View File

@@ -139,7 +139,7 @@ class TensorAdd(_MathBinaryOp):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> add = P.TensorAdd()
>>> add = ops.TensorAdd()
>>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
>>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
>>> output = add(input_x, input_y)
@@ -180,7 +180,7 @@ class AssignAdd(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.AssignAdd = P.AssignAdd()
... self.AssignAdd = ops.AssignAdd()
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
...
... def construct(self, x):
@@ -235,7 +235,7 @@ class AssignSub(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.AssignSub = P.AssignSub()
... self.AssignSub = ops.AssignSub()
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
...
... def construct(self, x):
@@ -358,7 +358,7 @@ class ReduceMean(_Reduce):

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMean(keep_dims=True)
>>> op = ops.ReduceMean(keep_dims=True)
>>> output = op(input_x, 1)
>>> result = output.shape
>>> print(result)
@@ -396,7 +396,7 @@ class ReduceSum(_Reduce):

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceSum(keep_dims=True)
>>> op = ops.ReduceSum(keep_dims=True)
>>> output = op(input_x, 1)
>>> output.shape
(3, 1, 5, 6)
@@ -440,7 +440,7 @@ class ReduceAll(_Reduce):

Examples:
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
>>> op = P.ReduceAll(keep_dims=True)
>>> op = ops.ReduceAll(keep_dims=True)
>>> output = op(input_x, 1)
>>> print(output)
[[False]
@@ -482,7 +482,7 @@ class ReduceAny(_Reduce):

Examples:
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
>>> op = P.ReduceAny(keep_dims=True)
>>> op = ops.ReduceAny(keep_dims=True)
>>> output = op(input_x, 1)
>>> print(output)
[[ True]
@@ -524,7 +524,7 @@ class ReduceMax(_Reduce):

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMax(keep_dims=True)
>>> op = ops.ReduceMax(keep_dims=True)
>>> output = op(input_x, 1)
>>> result = output.shape
>>> print(result)
@@ -572,7 +572,7 @@ class ReduceMin(_Reduce):

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceMin(keep_dims=True)
>>> op = ops.ReduceMin(keep_dims=True)
>>> output = op(input_x, 1)
>>> result = output.shape
>>> print(result)
@@ -611,7 +611,7 @@ class ReduceProd(_Reduce):

Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = P.ReduceProd(keep_dims=True)
>>> op = ops.ReduceProd(keep_dims=True)
>>> output = op(input_x, 1)
>>> reuslt = output.shape
>>> print(result)
@@ -641,13 +641,13 @@ class CumProd(PrimitiveWithInfer):
Examples:
>>> a, b, c, = 1, 2, 3
>>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
>>> op0 = P.CumProd()
>>> op0 = ops.CumProd()
>>> output0 = op0(input_x, 0) # output=[a, a * b, a * b * c]
>>> op1 = P.CumProd(exclusive=True)
>>> op1 = ops.CumProd(exclusive=True)
>>> output1 = op1(input_x, 0) # output=[1, a, a * b]
>>> op2 = P.CumProd(reverse=True)
>>> op2 = ops.CumProd(reverse=True)
>>> output2 = op2(input_x, 0) # output=[a * b * c, b * c, c]
>>> op3 = P.CumProd(exclusive=True, reverse=True)
>>> op3 = ops.CumProd(exclusive=True, reverse=True)
>>> output3 = op3(input_x, 0) # output=[b * c, c, 1]
>>> print(output0)
[1. 2. 6.]
@@ -705,7 +705,7 @@ class MatMul(PrimitiveWithInfer):
Examples:
>>> input_x1 = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
>>> input_x2 = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
>>> matmul = P.MatMul()
>>> matmul = ops.MatMul()
>>> output = matmul(input_x1, input_x2)
"""

@@ -787,7 +787,7 @@ class BatchMatMul(MatMul):
Examples:
>>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
>>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = P.BatchMatMul()
>>> batmatmul = ops.BatchMatMul()
>>> output = batmatmul(input_x, input_y)
>>> print(output)
[[[[3. 3. 3. 3.]]
@@ -801,7 +801,7 @@ class BatchMatMul(MatMul):
>>>
>>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
>>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = P.BatchMatMul(transpose_a=True)
>>> batmatmul = ops.BatchMatMul(transpose_a=True)
>>> output = batmatmul(input_x, input_y)
>>> print(output)
[[[[3. 3. 3. 3.]]
@@ -848,7 +848,7 @@ class CumSum(PrimitiveWithInfer):

Examples:
>>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32))
>>> cumsum = P.CumSum()
>>> cumsum = ops.CumSum()
>>> output = cumsum(input, 1)
>>> print(output)
[[ 3. 7. 13. 23.]
@@ -898,7 +898,7 @@ class AddN(PrimitiveWithInfer):
>>> class NetAddN(nn.Cell):
... def __init__(self):
... super(NetAddN, self).__init__()
... self.addN = P.AddN()
... self.addN = ops.AddN()
...
... def construct(self, *z):
... return self.addN(z)
@@ -984,7 +984,7 @@ class AccumulateNV2(PrimitiveWithInfer):
>>> class NetAccumulateNV2(nn.Cell):
... def __init__(self):
... super(NetAccumulateNV2, self).__init__()
... self.accumulateNV2 = P.AccumulateNV2()
... self.accumulateNV2 = ops.AccumulateNV2()
...
... def construct(self, *z):
... return self.accumulateNV2(z)
@@ -1043,7 +1043,7 @@ class Neg(PrimitiveWithInfer):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> neg = P.Neg()
>>> neg = ops.Neg()
>>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
>>> output = neg(input_x)
>>> print(output)
@@ -1094,7 +1094,7 @@ class InplaceAdd(PrimitiveWithInfer):
>>> indices = (0, 1)
>>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
>>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
>>> inplaceAdd = P.InplaceAdd(indices)
>>> inplaceAdd = ops.InplaceAdd(indices)
>>> output = inplaceAdd(input_x, input_v)
>>> print(output)
[[1.5 3. ]
@@ -1156,7 +1156,7 @@ class InplaceSub(PrimitiveWithInfer):
>>> indices = (0, 1)
>>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
>>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
>>> inplaceSub = P.InplaceSub(indices)
>>> inplaceSub = ops.InplaceSub(indices)
>>> output = inplaceSub(input_x, input_v)
>>> print(output)
[[0.5 1. ]
@@ -1222,7 +1222,7 @@ class Sub(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
>>> sub = P.Sub()
>>> sub = ops.Sub()
>>> output = sub(input_x, input_y)
>>> print(output)
[-3 -3 -3]
@@ -1265,7 +1265,7 @@ class Mul(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
>>> mul = P.Mul()
>>> mul = ops.Mul()
>>> output = mul(input_x, input_y)
>>> print(output)
[ 4. 10. 18.]
@@ -1308,7 +1308,7 @@ class SquaredDifference(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
>>> squared_difference = P.SquaredDifference()
>>> squared_difference = ops.SquaredDifference()
>>> output = squared_difference(input_x, input_y)
>>> print(output)
[1. 4. 9.]
@@ -1334,7 +1334,7 @@ class Square(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> square = P.Square()
>>> square = ops.Square()
>>> output = square(input_x)
>>> print(output)
[1. 4. 9.]
@@ -1376,7 +1376,7 @@ class Rsqrt(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
>>> rsqrt = P.Rsqrt()
>>> rsqrt = ops.Rsqrt()
>>> output = rsqrt(input_tensor)
>>> print(output)
[[0.5 0.5 ]
@@ -1419,7 +1419,7 @@ class Sqrt(PrimitiveWithCheck):

Examples:
>>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
>>> sqrt = P.Sqrt()
>>> sqrt = ops.Sqrt()
>>> output = sqrt(input_x)
>>> print(output)
[1. 2. 3.]
@@ -1457,7 +1457,7 @@ class Reciprocal(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> reciprocal = P.Reciprocal()
>>> reciprocal = ops.Reciprocal()
>>> output = reciprocal(input_x)
>>> print(output)
[1. 0.5 0.25]
@@ -1515,14 +1515,14 @@ class Pow(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> input_y = 3.0
>>> pow = P.Pow()
>>> pow = ops.Pow()
>>> output = pow(input_x, input_y)
>>> print(output)
[ 1. 8. 64.]
>>>
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
>>> pow = P.Pow()
>>> pow = ops.Pow()
>>> output = pow(input_x, input_y)
>>> print(output)
[ 1. 16. 64.]
@@ -1553,7 +1553,7 @@ class Exp(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> exp = P.Exp()
>>> exp = ops.Exp()
>>> output = exp(input_x)
>>> print(output)
[ 2.718282 7.389056 54.598152]
@@ -1595,7 +1595,7 @@ class Expm1(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
>>> expm1 = P.Expm1()
>>> expm1 = ops.Expm1()
>>> output = expm1(input_x)
>>> print(output)
[ 0. 1.718282 6.389056 53.598152]
@@ -1637,7 +1637,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
Examples:
>>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
>>> range = Tensor([0.0, 5.0], mindspore.float16)
>>> hist = P.HistogramFixedWidth(5)
>>> hist = ops.HistogramFixedWidth(5)
>>> output = hist(x, range)
>>> print(output)
[2 1 1 0 2]
@@ -1677,7 +1677,7 @@ class Log(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> log = P.Log()
>>> log = ops.Log()
>>> output = log(input_x)
>>> print(output)
[0. 0.6931472 1.38629444]
@@ -1718,7 +1718,7 @@ class Log1p(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> log1p = P.Log1p()
>>> log1p = ops.Log1p()
>>> output = log1p(input_x)
>>> print(output)
[0.6931472 1.0986123 1.609438 ]
@@ -1752,7 +1752,7 @@ class Erf(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
>>> erf = P.Erf()
>>> erf = ops.Erf()
>>> output = erf(input_x)
>>> print(output)
[-0.8427168 0. 0.8427168 0.99530876 0.99997765]
@@ -1786,7 +1786,7 @@ class Erfc(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
>>> erfc = P.Erfc()
>>> erfc = ops.Erfc()
>>> output = erfc(input_x)
>>> print(output)
[1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
@@ -1832,7 +1832,7 @@ class Minimum(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
>>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> minimum = P.Minimum()
>>> minimum = ops.Minimum()
>>> output = minimum(input_x, input_y)
>>> print(output)
[1. 2. 3.]
@@ -1875,7 +1875,7 @@ class Maximum(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
>>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> maximum = P.Maximum()
>>> maximum = ops.Maximum()
>>> output = maximum(input_x, input_y)
>>> print(output)
[4. 5. 6.]
@@ -1918,7 +1918,7 @@ class RealDiv(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
>>> realdiv = P.RealDiv()
>>> realdiv = ops.RealDiv()
>>> output = realdiv(input_x, input_y)
>>> print(output)
[0.25 0.4 0.5 ]
@@ -1962,7 +1962,7 @@ class Div(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
>>> div = P.Div()
>>> div = ops.Div()
>>> output = div(input_x, input_y)
>>> print(output)
[-1.3333334 2.5 2. ]
@@ -2004,7 +2004,7 @@ class DivNoNan(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
>>> div_no_nan = P.DivNoNan()
>>> div_no_nan = ops.DivNoNan()
>>> output = div_no_nan(input_x, input_y)
>>> print(output)
[0. 0. 0. 2.5 2. ]
@@ -2053,7 +2053,7 @@ class FloorDiv(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> floor_div = P.FloorDiv()
>>> floor_div = ops.FloorDiv()
>>> output = floor_div(input_x, input_y)
>>> print(output)
[ 0 1 -1]
@@ -2088,7 +2088,7 @@ class TruncateDiv(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> truncate_div = P.TruncateDiv()
>>> truncate_div = ops.TruncateDiv()
>>> output = truncate_div(input_x, input_y)
>>> print(output)
[0 1 0]
@@ -2122,7 +2122,7 @@ class TruncateMod(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> truncate_mod = P.TruncateMod()
>>> truncate_mod = ops.TruncateMod()
>>> output = truncate_mod(input_x, input_y)
>>> print(output)
[ 2 1 -1]
@@ -2157,7 +2157,7 @@ class Mod(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
>>> mod = P.Mod()
>>> mod = ops.Mod()
>>> output = mod(input_x, input_y)
>>> print(output)
[-1. 1. 0.]
@@ -2186,7 +2186,7 @@ class Floor(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
>>> floor = P.Floor()
>>> floor = ops.Floor()
>>> output = floor(input_x)
>>> print(output)
[ 1. 2. -2.]
@@ -2231,7 +2231,7 @@ class FloorMod(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> floor_mod = P.FloorMod()
>>> floor_mod = ops.FloorMod()
>>> output = floor_mod(input_x, input_y)
>>> print(output)
[2 1 2]
@@ -2253,7 +2253,7 @@ class Ceil(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
>>> ceil_op = P.Ceil()
>>> ceil_op = ops.Ceil()
>>> output = ceil_op(input_x)
>>> print(output)
[ 2. 3. -1.]
@@ -2298,7 +2298,7 @@ class Xdivy(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32)
>>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
>>> xdivy = P.Xdivy()
>>> xdivy = ops.Xdivy()
>>> output = xdivy(input_x, input_y)
>>> print(output)
[ 1. 2. -0.5]
@@ -2337,7 +2337,7 @@ class Xlogy(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
>>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
>>> xlogy = P.Xlogy()
>>> xlogy = ops.Xlogy()
>>> output = xlogy(input_x, input_y)
>>> print(output)
[-3.465736 0. 2.7725887]
@@ -2361,7 +2361,7 @@ class Acosh(PrimitiveWithInfer):
``Ascend``

Examples:
>>> acosh = P.Acosh()
>>> acosh = ops.Acosh()
>>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = acosh(input_x)
"""
@@ -2392,7 +2392,7 @@ class Cosh(PrimitiveWithInfer):
``Ascend``

Examples:
>>> cosh = P.Cosh()
>>> cosh = ops.Cosh()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = cosh(input_x)
>>> print(output)
@@ -2425,7 +2425,7 @@ class Asinh(PrimitiveWithInfer):
``Ascend``

Examples:
>>> asinh = P.Asinh()
>>> asinh = ops.Asinh()
>>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = asinh(input_x)
>>> print(output)
@@ -2458,7 +2458,7 @@ class Sinh(PrimitiveWithInfer):
``Ascend``

Examples:
>>> sinh = P.Sinh()
>>> sinh = ops.Sinh()
>>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = sinh(input_x)
>>> print(output)
@@ -2515,13 +2515,13 @@ class Equal(_LogicBinaryOp):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> equal = P.Equal()
>>> equal = ops.Equal()
>>> equal(input_x, 2.0)
[False, True, False]
>>>
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
>>> equal = P.Equal()
>>> equal = ops.Equal()
>>> output = equal(input_x, input_y)
>>> print(output)
[ True True False]
@@ -2565,7 +2565,7 @@ class ApproximateEqual(_LogicBinaryOp):
Examples:
>>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32)
>>> approximate_equal = P.ApproximateEqual(2.)
>>> approximate_equal = ops.ApproximateEqual(2.)
>>> output = approximate_equal(x1, x2)
>>> print(output)
[ True True False]
@@ -2606,7 +2606,7 @@ class EqualCount(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
>>> equal_count = P.EqualCount()
>>> equal_count = ops.EqualCount()
>>> output = equal_count(input_x, input_y)
>>> print(output)
[2]
@@ -2651,14 +2651,14 @@ class NotEqual(_LogicBinaryOp):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> not_equal = P.NotEqual()
>>> not_equal = ops.NotEqual()
>>> output = not_equal(input_x, 2.0)
>>> print(output)
[ True False True]
>>>
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
>>> not_equal = P.NotEqual()
>>> not_equal = ops.NotEqual()
>>> output = not_equal(input_x, input_y)
>>> print(output)
[False False True]
@@ -2694,7 +2694,7 @@ class Greater(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> greater = P.Greater()
>>> greater = ops.Greater()
>>> output = greater(input_x, input_y)
>>> print(output)
[False True False]
@@ -2735,7 +2735,7 @@ class GreaterEqual(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> greater_equal = P.GreaterEqual()
>>> greater_equal = ops.GreaterEqual()
>>> output = greater_equal(input_x, input_y)
>>> print(output)
[ True True False]
@@ -2776,7 +2776,7 @@ class Less(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> less = P.Less()
>>> less = ops.Less()
>>> output = less(input_x, input_y)
>>> print(output)
[False False True]
@@ -2817,7 +2817,7 @@ class LessEqual(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> less_equal = P.LessEqual()
>>> less_equal = ops.LessEqual()
>>> output = less_equal(input_x, input_y)
>>> print(output)
[ True False True]
@@ -2847,7 +2847,7 @@ class LogicalNot(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> logical_not = P.LogicalNot()
>>> logical_not = ops.LogicalNot()
>>> output = logical_not(input_x)
>>> print(output)
[False True False]
@@ -2891,7 +2891,7 @@ class LogicalAnd(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_and = P.LogicalAnd()
>>> logical_and = ops.LogicalAnd()
>>> output = logical_and(input_x, input_y)
>>> print(output)
[ True False False]
@@ -2926,7 +2926,7 @@ class LogicalOr(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_or = P.LogicalOr()
>>> logical_or = ops.LogicalOr()
>>> output = logical_or(input_x, input_y)
>>> print(output)
[ True True True]
@@ -2950,7 +2950,7 @@ class IsNan(PrimitiveWithInfer):
``GPU``

Examples:
>>> is_nan = P.IsNan()
>>> is_nan = ops.IsNan()
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> result = is_nan(input_x)
"""
@@ -2981,7 +2981,7 @@ class IsInf(PrimitiveWithInfer):
``GPU``

Examples:
>>> is_inf = P.IsInf()
>>> is_inf = ops.IsInf()
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> result = is_inf(input_x)
"""
@@ -3012,7 +3012,7 @@ class IsFinite(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> is_finite = P.IsFinite()
>>> is_finite = ops.IsFinite()
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = is_finite(input_x)
>>> print(output)
@@ -3047,7 +3047,7 @@ class FloatStatus(PrimitiveWithInfer):
``GPU``

Examples:
>>> float_status = P.FloatStatus()
>>> float_status = ops.FloatStatus()
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> result = float_status(input_x)
>>> print(result)
@@ -3083,7 +3083,7 @@ class NPUAllocFloatStatus(PrimitiveWithInfer):
``Ascend``

Examples:
>>> alloc_status = P.NPUAllocFloatStatus()
>>> alloc_status = ops.NPUAllocFloatStatus()
>>> output = alloc_status()
>>> print(output)
[0. 0. 0. 0. 0. 0. 0. 0.]
@@ -3120,8 +3120,8 @@ class NPUGetFloatStatus(PrimitiveWithInfer):
``Ascend``

Examples:
>>> alloc_status = P.NPUAllocFloatStatus()
>>> get_status = P.NPUGetFloatStatus()
>>> alloc_status = ops.NPUAllocFloatStatus()
>>> get_status = ops.NPUGetFloatStatus()
>>> init = alloc_status()
>>> output = get_status(init)
>>> print(output)
@@ -3165,9 +3165,9 @@ class NPUClearFloatStatus(PrimitiveWithInfer):
``Ascend``

Examples:
>>> alloc_status = P.NPUAllocFloatStatus()
>>> get_status = P.NPUGetFloatStatus()
>>> clear_status = P.NPUClearFloatStatus()
>>> alloc_status = ops.NPUAllocFloatStatus()
>>> get_status = ops.NPUGetFloatStatus()
>>> clear_status = ops.NPUClearFloatStatus()
>>> init = alloc_status()
>>> flag = get_status(init)
>>> output = clear_status(init)
@@ -3205,7 +3205,7 @@ class Cos(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> cos = P.Cos()
>>> cos = ops.Cos()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = cos(input_x)
>>> print(output)
@@ -3238,7 +3238,7 @@ class ACos(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> acos = P.ACos()
>>> acos = ops.ACos()
>>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = acos(input_x)
"""
@@ -3269,7 +3269,7 @@ class Sin(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> sin = P.Sin()
>>> sin = ops.Sin()
>>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = sin(input_x)
>>> print(output)
@@ -3302,7 +3302,7 @@ class Asin(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> asin = P.Asin()
>>> asin = ops.Asin()
>>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = asin(input_x)
>>> print(output)
@@ -3359,7 +3359,7 @@ class NMSWithMask(PrimitiveWithInfer):
>>> bbox[:, 2] += bbox[:, 0]
>>> bbox[:, 3] += bbox[:, 1]
>>> inputs = Tensor(bbox, mindspore.float32)
>>> nms = P.NMSWithMask(0.5)
>>> nms = ops.NMSWithMask(0.5)
>>> output_boxes, indices, mask = nms(inputs)
"""

@@ -3398,7 +3398,7 @@ class Abs(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
>>> abs = P.Abs()
>>> abs = ops.Abs()
>>> output = abs(input_x)
>>> print(output)
[1. 1. 0.]
@@ -3445,7 +3445,7 @@ class Sign(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
>>> sign = P.Sign()
>>> sign = ops.Sign()
>>> output = sign(input_x)
>>> print(output)
[[ 1. 0. -1.]]
@@ -3478,7 +3478,7 @@ class Round(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
>>> round = P.Round()
>>> round = ops.Round()
>>> output = round(input_x)
>>> print(output)
[ 1. 2. 2. 2. -4.]
@@ -3512,7 +3512,7 @@ class Tan(PrimitiveWithInfer):
``Ascend``

Examples:
>>> tan = P.Tan()
>>> tan = ops.Tan()
>>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
>>> output = tan(input_x)
[-1.5574081 0. 1.5574081]
@@ -3546,9 +3546,9 @@ class Atan(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
>>> tan = P.Tan()
>>> tan = ops.Tan()
>>> output_y = tan(input_x)
>>> atan = P.Atan()
>>> atan = ops.Atan()
>>> output = atan(output_y)
>>> print(output)
[1.047 0.7850001]
@@ -3581,7 +3581,7 @@ class Atanh(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
>>> atanh = P.Atanh()
>>> atanh = ops.Atanh()
>>> output = atanh(input_x)
>>> print(output)
[1.8869909 1.058268 ]
@@ -3624,7 +3624,7 @@ class Atan2(_MathBinaryOp):
Examples:
>>> input_x = Tensor(np.array([0, 1]), mindspore.float32)
>>> input_y = Tensor(np.array([1, 1]), mindspore.float32)
>>> atan2 = P.Atan2()
>>> atan2 = ops.Atan2()
>>> output = atan2(input_x, input_y)
>>> print(output)
[0. 0.7853982]
@@ -3652,7 +3652,7 @@ class SquareSumAll(PrimitiveWithInfer):
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
>>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
>>> square_sum_all = P.SquareSumAll()
>>> square_sum_all = ops.SquareSumAll()
>>> output = square_sum_all(input_x1, input_x2)
>>> print(output)
(Tensor(shape=[], dtype=Float32, value= 4),
@@ -3697,7 +3697,7 @@ class BitwiseAnd(_BitwiseBinaryOp):
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
>>> bitwise_and = P.BitwiseAnd()
>>> bitwise_and = ops.BitwiseAnd()
>>> output = bitwise_and(input_x1, input_x2)
>>> print(output)
[ 0 0 1 -1 1 0 1]
@@ -3727,7 +3727,7 @@ class BitwiseOr(_BitwiseBinaryOp):
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
>>> bitwise_or = P.BitwiseOr()
>>> bitwise_or = ops.BitwiseOr()
>>> boutput = itwise_or(input_x1, input_x2)
>>> print(output)
[ 0 1 1 -1 -1 3 3]
@@ -3757,7 +3757,7 @@ class BitwiseXor(_BitwiseBinaryOp):
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
>>> bitwise_xor = P.BitwiseXor()
>>> bitwise_xor = ops.BitwiseXor()
>>> output = bitwise_xor(input_x1, input_x2)
>>> print(output)
[ 0 1 0 0 -2 3 2]
@@ -3779,7 +3779,7 @@ class BesselI0e(PrimitiveWithInfer):
``Ascend``

Examples:
>>> bessel_i0e = P.BesselI0e()
>>> bessel_i0e = ops.BesselI0e()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = bessel_i0e(input_x)
>>> print(output)
@@ -3813,7 +3813,7 @@ class BesselI1e(PrimitiveWithInfer):
``Ascend``

Examples:
>>> bessel_i1e = P.BesselI1e()
>>> bessel_i1e = ops.BesselI1e()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = bessel_i1e(input_x)
>>> print(output)
@@ -3847,7 +3847,7 @@ class Inv(PrimitiveWithInfer):
``Ascend``

Examples:
>>> inv = P.Inv()
>>> inv = ops.Inv()
>>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
>>> output = inv(input_x)
>>> print(output)
@@ -3881,7 +3881,7 @@ class Invert(PrimitiveWithInfer):
``Ascend``

Examples:
>>> invert = P.Invert()
>>> invert = ops.Invert()
>>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
>>> output = invert(input_x)
>>> print(output)
@@ -3915,7 +3915,7 @@ class Eps(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor([4, 1, 2, 3], mindspore.float32)
>>> output = P.Eps()(input_x)
>>> output = ops.Eps()(input_x)
>>> print(output)
[1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
"""


+ 112
- 113
mindspore/ops/operations/nn_ops.py View File

@@ -91,7 +91,7 @@ class Flatten(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> flatten = ops.Flatten()
>>> output = flatten(input_tensor)
>>> print(output.shape)
(1, 24)
@@ -138,7 +138,7 @@ class Softmax(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = P.Softmax()
>>> softmax = ops.Softmax()
>>> output = softmax(input_x)
>>> print(output)
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
@@ -192,7 +192,7 @@ class LogSoftmax(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> log_softmax = P.LogSoftmax()
>>> log_softmax = ops.LogSoftmax()
>>> output = log_softmax(input_x)
>>> print(output)
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
@@ -233,7 +233,7 @@ class Softplus(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softplus = P.Softplus()
>>> softplus = ops.Softplus()
>>> output = softplus(input_x)
>>> print(output)
[1.3132615 2.126928 3.0485873 4.01815 5.0067153]
@@ -272,7 +272,7 @@ class Softsign(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign = ops.Softsign()
>>> output = softsign(input_x)
>>> print(output)
[ 0. -0.5 0.6666667 0.9677419 -0.9677419]
@@ -308,7 +308,7 @@ class ReLU(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> relu = ops.ReLU()
>>> output = relu(input_x)
>>> print(output)
[[0. 4. 0.]
@@ -345,7 +345,7 @@ class ReLU6(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> relu6 = ops.ReLU6()
>>> result = relu6(input_x)
>>> print(result)
[[0. 4. 0.]
@@ -383,7 +383,7 @@ class ReLUV2(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
>>> relu_v2 = P.ReLUV2()
>>> relu_v2 = ops.ReLUV2()
>>> output, mask= relu_v2(input_x)
>>> print(output)
[[[[1. 0.]
@@ -464,7 +464,7 @@ class Elu(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> elu = ops.Elu()
>>> output = elu(input_x)
>>> print(output)
[[-0.63212055 4. -0.99966455]
@@ -508,7 +508,7 @@ class HSwish(PrimitiveWithInfer):
``GPU``

Examples:
>>> hswish = P.HSwish()
>>> hswish = ops.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
>>> print(result)
@@ -549,7 +549,7 @@ class Sigmoid(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> sigmoid = P.Sigmoid()
>>> sigmoid = ops.Sigmoid()
>>> output = sigmoid(input_x)
>>> print(output)
[0.7310586 0.880797 0.95257413 0.98201376 0.9933072 ]
@@ -590,7 +590,7 @@ class HSigmoid(PrimitiveWithInfer):
``GPU``

Examples:
>>> hsigmoid = P.HSigmoid()
>>> hsigmoid = ops.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
>>> print(result)
@@ -631,7 +631,7 @@ class Tanh(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> tanh = P.Tanh()
>>> tanh = ops.Tanh()
>>> output = tanh(input_x)
>>> print(output)
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
@@ -697,11 +697,11 @@ class FusedBatchNorm(Primitive):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class FusedBatchNormNet(nn.Cell):
>>> def __init__(self):
>>> super(FusedBatchNormNet, self).__init__()
>>> self.fused_batch_norm = P.FusedBatchNorm()
>>> self.fused_batch_norm = ops.FusedBatchNorm()
>>> self.scale = Parameter(Tensor(np.ones([64]), mindspore.float32), name="scale")
>>> self.bias = Parameter(Tensor(np.ones([64]), mindspore.float32), name="bias")
>>> self.mean = Parameter(Tensor(np.ones([64]), mindspore.float32), name="mean")
@@ -794,11 +794,11 @@ class FusedBatchNormEx(PrimitiveWithInfer):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class FusedBatchNormExNet(nn.Cell):
>>> def __init__(self):
>>> super(FusedBatchNormExNet, self).__init__()
>>> self.fused_batch_norm_ex = P.FusedBatchNormEx()
>>> self.fused_batch_norm_ex = ops.FusedBatchNormEx()
>>> self.scale = Parameter(Tensor(np.ones([64]), mindspore.float32), name="scale")
>>> self.bias = Parameter(Tensor(np.ones([64]), mindspore.float32), name="bias")
>>> self.mean = Parameter(Tensor(np.ones([64]), mindspore.float32), name="mean")
@@ -874,7 +874,7 @@ class BNTrainingReduce(PrimitiveWithInfer):

Examples:
>>> input_x = Tensor(np.ones([128, 3, 32, 3]), mindspore.float32)
>>> bn_training_reduce = P.BNTrainingReduce()
>>> bn_training_reduce = ops.BNTrainingReduce()
>>> output = bn_training_reduce(input_x)
>>> print(output)
([1.22880000e+04, 1.22880000e+04, 1.22880000e+04],
@@ -940,7 +940,7 @@ class BNTrainingUpdate(PrimitiveWithInfer):
>>> offset = Tensor(np.ones([2]), mindspore.float32)
>>> mean = Tensor(np.ones([2]), mindspore.float32)
>>> variance = Tensor(np.ones([2]), mindspore.float32)
>>> bn_training_update = P.BNTrainingUpdate()
>>> bn_training_update = ops.BNTrainingUpdate()
>>> output = bn_training_update(input_x, sum, square_sum, scale, offset, mean, variance)
>>> print(output)
([[[[2.73200464e+00, 2.73200464e+00],
@@ -1041,7 +1041,7 @@ class BatchNorm(PrimitiveWithInfer):
>>> bias = Tensor(np.ones([2]), mindspore.float32)
>>> mean = Tensor(np.ones([2]), mindspore.float32)
>>> variance = Tensor(np.ones([2]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> batch_norm = ops.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
>>> print(output)
([[1.0, 1.0],
@@ -1147,7 +1147,7 @@ class Conv2D(PrimitiveWithInfer):
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)
>>> conv2d = ops.Conv2D(out_channel=32, kernel_size=3)
>>> output = conv2d(input, weight)
>>> print(output.shape)
(10, 32, 30, 30)
@@ -1295,7 +1295,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> depthwise_conv2d = ops.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> print(output.shape)
(10, 96, 30, 30)
@@ -1508,7 +1508,7 @@ class MaxPool(_Pool):

Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1)
>>> maxpool_op = ops.MaxPool(padding="VALID", ksize=2, strides=1)
>>> output_tensor = maxpool_op(input_tensor)
"""

@@ -1563,7 +1563,7 @@ class MaxPoolWithArgmax(_Pool):

Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> maxpool_arg_op = ops.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
"""

@@ -1649,11 +1649,11 @@ class AvgPool(_Pool):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1)
... self.avgpool_op = ops.AvgPool(padding="VALID", ksize=2, strides=1)
...
... def construct(self, x):
... result = self.avgpool_op(x)
@@ -1720,7 +1720,7 @@ class Conv2DBackpropInput(PrimitiveWithInfer):
>>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> x = Tensor(np.ones([10, 32, 32, 32]))
>>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> conv2d_backprop_input = ops.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> output = conv2d_backprop_input(dout, weight, F.shape(x))
>>> print(output.shape)
(10, 32, 32, 32)
@@ -1840,7 +1840,7 @@ class BiasAdd(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add = ops.BiasAdd()
>>> output = bias_add(input_x, bias)
>>> print(output)
[[0.4662124 1.2493685 2.3611782]
@@ -1890,7 +1890,7 @@ class TopK(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> topk = P.TopK(sorted=True)
>>> topk = ops.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
@@ -1944,7 +1944,7 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
Examples:
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> softmax_cross = ops.SoftmaxCrossEntropyWithLogits()
>>> loss, dlogits = softmax_cross(logits, labels)
>>> print(loss)
[0.5899297 0.52374405]
@@ -2125,7 +2125,7 @@ class SmoothL1Loss(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> loss = P.SmoothL1Loss()
>>> loss = ops.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> output = loss(input_data, target_data)
@@ -2172,7 +2172,7 @@ class L2Loss(PrimitiveWithInfer):

Examples
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
>>> l2_loss = P.L2Loss()
>>> l2_loss = ops.L2Loss()
>>> output = l2_loss(input_x)
>>> print(output)
7.0
@@ -2212,7 +2212,7 @@ class DataFormatDimMap(PrimitiveWithInfer):

Examples:
>>> x = Tensor([0, 1, 2, 3], mindspore.int32)
>>> dfdm = P.DataFormatDimMap()
>>> dfdm = ops.DataFormatDimMap()
>>> output = dfdm(x)
>>> print(output)
[0 3 1 2]
@@ -2261,7 +2261,7 @@ class RNNTLoss(PrimitiveWithInfer):
>>> labels = np.array([[1, 2]]).astype(np.int32)
>>> input_length = np.array([T] * B).astype(np.int32)
>>> label_length = np.array([len(l) for l in labels]).astype(np.int32)
>>> rnnt_loss = P.RNNTLoss(blank_label=blank)
>>> rnnt_loss = ops.RNNTLoss(blank_label=blank)
>>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))
"""

@@ -2324,7 +2324,7 @@ class SGD(PrimitiveWithCheck):
``Ascend`` ``GPU``

Examples:
>>> sgd = P.SGD()
>>> sgd = ops.SGD()
>>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)
>>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)
>>> learning_rate = Tensor(0.01, mindspore.float32)
@@ -2406,7 +2406,7 @@ class ApplyRMSProp(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> apply_rms = P.ApplyRMSProp()
>>> apply_rms = ops.ApplyRMSProp()
>>> input_x = Tensor(1., mindspore.float32)
>>> mean_square = Tensor(2., mindspore.float32)
>>> moment = Tensor(1., mindspore.float32)
@@ -2508,7 +2508,7 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> centered_rms_prop = P.ApplyCenteredRMSProp()
>>> centered_rms_prop = ops.ApplyCenteredRMSProp()
>>> input_x = Tensor(np.arange(-2, 2).astype(np.float32).reshape(2, 2), mindspore.float32)
>>> mean_grad = Tensor(np.arange(4).astype(np.float32).reshape(2, 2), mindspore.float32)
>>> mean_square = Tensor(np.arange(-3, 1).astype(np.float32).reshape(2, 2), mindspore.float32)
@@ -2605,7 +2605,7 @@ class LayerNorm(Primitive):
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = P.LayerNorm()
>>> layer_norm = ops.LayerNorm()
>>> output, mean, variance = layer_norm(input_x, gamma, beta)
>>> print(output)
[[-0.2247448 1. 2.2247448]
@@ -2650,7 +2650,7 @@ class L2Normalize(PrimitiveWithInfer):
``Ascend``

Examples:
>>> l2_normalize = P.L2Normalize()
>>> l2_normalize = ops.L2Normalize()
>>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)
>>> output = l2_normalize(input_x)
>>> print(output)
@@ -2697,7 +2697,7 @@ class DropoutGenMask(Primitive):
``Ascend``

Examples:
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_gen_mask = ops.DropoutGenMask()
>>> shape = (2, 4, 5)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> output = dropout_gen_mask(shape, keep_prob)
@@ -2738,8 +2738,8 @@ class DropoutDoMask(PrimitiveWithInfer):
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> shape = (2, 2, 3)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> dropout_gen_mask = ops.DropoutGenMask()
>>> dropout_do_mask = ops.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> print(output)
@@ -2810,7 +2810,7 @@ class ResizeBilinear(PrimitiveWithInfer):

Examples:
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> resize_bilinear = ops.ResizeBilinear((5, 5))
>>> output = resize_bilinear(tensor)
>>> print(output)
[[[[1. 2. 3. 4. 5.]
@@ -2870,7 +2870,7 @@ class OneHot(PrimitiveWithInfer):
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = P.OneHot()
>>> onehot = ops.OneHot()
>>> output = onehot(indices, depth, on_value, off_value)
>>> print(output)
[[1. 0. 0.]
@@ -2929,7 +2929,7 @@ class Gelu(PrimitiveWithInfer):

Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = P.Gelu()
>>> gelu = ops.Gelu()
>>> result = gelu(tensor)
>>> print(result)
[0.841192 1.9545976 2.9963627]
@@ -2974,7 +2974,7 @@ class GetNext(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> get_next = ops.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> feature, label = get_next()
"""

@@ -3026,11 +3026,11 @@ class PReLU(PrimitiveWithInfer):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.prelu = P.PReLU()
>>> self.prelu = ops.PReLU()
>>> def construct(self, input_x, weight):
>>> result = self.prelu(input_x, weight)
>>> return result
@@ -3214,7 +3214,7 @@ class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
Examples:
>>> logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]).astype(np.float32))
>>> labels = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]).astype(np.float32))
>>> sigmoid = P.SigmoidCrossEntropyWithLogits()
>>> sigmoid = ops.SigmoidCrossEntropyWithLogits()
>>> output = sigmoid(logits, labels)
>>> print(output)
[[0.6111007 0.5032824 0.26318604]
@@ -3257,7 +3257,7 @@ class Pad(PrimitiveWithInfer):

Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> pad_op = ops.Pad(((1, 2), (2, 1)))
>>> output = pad_op(input_tensor)
>>> print(output)
[[ 0. 0. 0. 0. 0. 0. ],
@@ -3325,13 +3325,13 @@ class MirrorPad(PrimitiveWithInfer):

Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.pad = P.MirrorPad(mode="REFLECT")
... self.pad = ops.MirrorPad(mode="REFLECT")
... def construct(self, x, paddings):
... return self.pad(x, paddings)
...
@@ -3411,7 +3411,7 @@ class ROIAlign(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> roi_align = ops.ROIAlign(2, 2, 0.5, 2)
>>> output = roi_align(input_tensor, rois)
>>> print(output)
[[[[1.775 2.025]
@@ -3501,11 +3501,11 @@ class Adam(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_adam = P.Adam()
... self.apply_adam = ops.Adam()
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v")
@@ -3604,12 +3604,12 @@ class AdamNoUpdateParam(PrimitiveWithInfer):
>>> import mindspore as ms
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>>
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.adam = P.AdamNoUpdateParam()
>>> self.adam = ops.AdamNoUpdateParam()
>>> self.m = Parameter(Tensor(np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]]).astype(np.float32)),
>>> name="m")
>>> self.v = Parameter(Tensor(np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]]).astype(np.float32)),
@@ -3717,12 +3717,12 @@ class FusedSparseAdam(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_adam = P.FusedSparseAdam()
... self.sparse_apply_adam = ops.FusedSparseAdam()
... self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
@@ -3855,12 +3855,12 @@ class FusedSparseLazyAdam(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()
... self.sparse_apply_lazyadam = ops.FusedSparseLazyAdam()
... self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
@@ -3969,11 +3969,11 @@ class FusedSparseFtrl(PrimitiveWithInfer):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class SparseApplyFtrlNet(nn.Cell):
... def __init__(self):
... super(SparseApplyFtrlNet, self).__init__()
... self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
... self.sparse_apply_ftrl = ops.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
... self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
... self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear")
@@ -4068,11 +4068,11 @@ class FusedSparseProximalAdagrad(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()
... self.sparse_apply_proximal_adagrad = ops.FusedSparseProximalAdagrad()
... self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
... self.lr = Tensor(0.01, mstype.float32)
@@ -4162,11 +4162,11 @@ class KLDivLoss(PrimitiveWithInfer):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.kldiv_loss = P.KLDivLoss()
... self.kldiv_loss = ops.KLDivLoss()
... def construct(self, x, y):
... result = self.kldiv_loss(x, y)
... return result
@@ -4241,11 +4241,11 @@ class BinaryCrossEntropy(PrimitiveWithInfer):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.binary_cross_entropy = P.BinaryCrossEntropy()
... self.binary_cross_entropy = ops.BinaryCrossEntropy()
... def construct(self, x, y, weight):
... result = self.binary_cross_entropy(x, y, weight)
... return result
@@ -4341,12 +4341,12 @@ class ApplyAdaMax(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_ada_max = P.ApplyAdaMax()
... self.apply_ada_max = ops.ApplyAdaMax()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="v")
@@ -4473,12 +4473,12 @@ class ApplyAdadelta(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_adadelta = P.ApplyAdadelta()
... self.apply_adadelta = ops.ApplyAdadelta()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum")
... self.accum_update = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum_update")
@@ -4585,12 +4585,12 @@ class ApplyAdagrad(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_adagrad = P.ApplyAdagrad()
... self.apply_adagrad = ops.ApplyAdagrad()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum")
... def construct(self, lr, grad):
@@ -4678,12 +4678,12 @@ class ApplyAdagradV2(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
... self.apply_adagrad_v2 = ops.ApplyAdagradV2(epsilon=1e-6)
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum")
... def construct(self, lr, grad):
@@ -4772,12 +4772,12 @@ class SparseApplyAdagrad(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)
... self.sparse_apply_adagrad = ops.SparseApplyAdagrad(lr=1e-8)
... self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum")
... def construct(self, grad, indices):
@@ -4867,12 +4867,12 @@ class SparseApplyAdagradV2(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
... self.sparse_apply_adagrad_v2 = ops.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
... self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum")
...
@@ -4962,11 +4962,11 @@ class ApplyProximalAdagrad(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
... self.apply_proximal_adagrad = ops.ApplyProximalAdagrad()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="accum")
... self.lr = 0.01
@@ -5076,11 +5076,11 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
... self.sparse_apply_proximal_adagrad = ops.SparseApplyProximalAdagrad()
... self.var = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="accum")
... self.lr = 0.01
@@ -5175,11 +5175,11 @@ class ApplyAddSign(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_add_sign = P.ApplyAddSign()
... self.apply_add_sign = ops.ApplyAddSign()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m")
... self.lr = 0.001
@@ -5293,11 +5293,11 @@ class ApplyPowerSign(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_power_sign = P.ApplyPowerSign()
... self.apply_power_sign = ops.ApplyPowerSign()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="m")
... self.lr = 0.001
@@ -5393,11 +5393,11 @@ class ApplyGradientDescent(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_gradient_descent = P.ApplyGradientDescent()
... self.apply_gradient_descent = ops.ApplyGradientDescent()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.alpha = 0.001
... def construct(self, delta):
@@ -5471,11 +5471,11 @@ class ApplyProximalGradientDescent(PrimitiveWithInfer):
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
... self.apply_proximal_gradient_descent = ops.ApplyProximalGradientDescent()
... self.var = Parameter(Tensor(np.random.rand(2, 2).astype(np.float32)), name="var")
... self.alpha = 0.001
... self.l1 = 0.0
@@ -5555,18 +5555,17 @@ class LARSUpdate(PrimitiveWithInfer):

Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> from mindspore.ops import operations as ops
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.lars = P.LARSUpdate()
... self.reduce = P.ReduceSum()
... self.lars = ops.LARSUpdate()
... self.reduce = ops.ReduceSum()
... def construct(self, weight, gradient):
... w_square_sum = self.reduce(F.square(weight))
... grad_square_sum = self.reduce(F.square(gradient))
... w_square_sum = self.reduce(ops.square(weight))
... grad_square_sum = self.reduce(ops.square(gradient))
... grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
... return grad_t
...
@@ -5649,11 +5648,11 @@ class ApplyFtrl(PrimitiveWithInfer):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class ApplyFtrlNet(nn.Cell):
... def __init__(self):
... super(ApplyFtrlNet, self).__init__()
... self.apply_ftrl = P.ApplyFtrl()
... self.apply_ftrl = ops.ApplyFtrl()
... self.lr = 0.001
... self.l1 = 0.0
... self.l2 = 0.0
@@ -5748,11 +5747,11 @@ class SparseApplyFtrl(PrimitiveWithCheck):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class SparseApplyFtrlNet(nn.Cell):
... def __init__(self):
... super(SparseApplyFtrlNet, self).__init__()
... self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
... self.sparse_apply_ftrl = ops.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
... self.var = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="accum")
... self.linear = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="linear")
@@ -5853,11 +5852,11 @@ class SparseApplyFtrlV2(PrimitiveWithInfer):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class SparseApplyFtrlV2Net(nn.Cell):
... def __init__(self):
... super(SparseApplyFtrlV2Net, self).__init__()
... self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
... self.sparse_apply_ftrl_v2 = ops.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
... l2_shrinkage=0.0, lr_power=-0.5)
... self.var = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="var")
... self.accum = Parameter(Tensor(np.random.rand(1, 2).astype(np.float32)), name="accum")
@@ -5932,7 +5931,7 @@ class Dropout(PrimitiveWithInfer):
- **mask** (Tensor) - with the same shape as the input tensor.

Examples:
>>> dropout = P.Dropout(keep_prob=0.5)
>>> dropout = ops.Dropout(keep_prob=0.5)
>>> x = Tensor((20, 16, 50, 50), mindspore.float32)
>>> output, mask = dropout(x)
>>> print(output)
@@ -5994,7 +5993,7 @@ class CTCLoss(PrimitiveWithInfer):
>>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)
>>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> ctc_loss = P.CTCLoss()
>>> ctc_loss = ops.CTCLoss()
>>> loss, gradient = ctc_loss(inputs, labels_indices, labels_values, sequence_length)
>>> print(loss)
[0.69121575 0.5381993 ]
@@ -6072,7 +6071,7 @@ class CTCGreedyDecoder(PrimitiveWithInfer):
... def __init__(self):
... super(CTCGreedyDecoderNet, self).__init__()
... self.ctc_greedy_decoder = P.CTCGreedyDecoder()
... self.assert_op = P.Assert(300)
... self.assert_op = ops.Assert(300)
...
... def construct(self, inputs, sequence_length):
... out = self.ctc_greedy_decoder(inputs,sequence_length)
@@ -6175,7 +6174,7 @@ class BasicLSTMCell(PrimitiveWithInfer):
>>> c = Tensor(np.random.rand(1, 2).astype(np.float16))
>>> w = Tensor(np.random.rand(34, 8).astype(np.float16))
>>> b = Tensor(np.random.rand(8, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm = ops.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> output = lstm(x, h, c, w, b)
>>> print(output)
(Tensor(shape=[1, 2], dtype=Float16, value=
@@ -6288,13 +6287,13 @@ class DynamicRNN(PrimitiveWithInfer):
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> import mindspore.context as context
>>> context.set_context(mode=context.GRAPH_MODE)
>>> class DynamicRNNNet(nn.Cell):
>>> def __init__(self):
>>> super(DynamicRNNNet, self).__init__()
>>> self.dynamic_rnn = P.DynamicRNN()
>>> self.dynamic_rnn = ops.DynamicRNN()
>>>
>>> def construct(self, x, w, b, init_h, init_c):
>>> out = self.dynamic_rnn(x, w, b, None, init_h, init_c)
@@ -6395,7 +6394,7 @@ class InTopK(PrimitiveWithInfer):
Examples:
>>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> in_top_k = ops.InTopK(3)
>>> output = in_top_k(x1, x2)
>>> print(output)
[ True False]
@@ -6442,7 +6441,7 @@ class LRN(PrimitiveWithInfer):

Examples:
>>> x = Tensor(np.random.rand(1, 2, 2, 2), mindspore.float32)
>>> lrn = P.LRN()
>>> lrn = ops.LRN()
>>> output = lrn(x)
>>> print(output)
[[[[0.18990143 0.59475636]


+ 13
- 13
mindspore/ops/operations/other_ops.py View File

@@ -47,7 +47,7 @@ class Assign(PrimitiveWithCheck):
... self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y")
...
... def construct(self, x):
... P.Assign()(self.y, x)
... ops.Assign()(self.y, x)
... return self.y
...
>>> x = Tensor([2.0], mindspore.float32)
@@ -85,7 +85,7 @@ class InplaceAssign(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.inplace_assign = P.InplaceAssign()
... self.inplace_assign = ops.InplaceAssign()
...
... def construct(self, x):
... val = x - 1.0
@@ -129,7 +129,7 @@ class BoundingBoxEncode(PrimitiveWithInfer):
Examples:
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
>>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
>>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0))
>>> boundingbox_encode = ops.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0))
>>> output = boundingbox_encode(anchor_box, groundtruth_box)
>>> print(output)
[[ 5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01]
@@ -185,7 +185,7 @@ class BoundingBoxDecode(PrimitiveWithInfer):
Examples:
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
>>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
>>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0),
>>> boundingbox_decode = ops.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0),
... max_shape=(768, 1280), wh_ratio_clip=0.016)
>>> output = boundingbox_decode(anchor_box, deltas)
>>> print(output)
@@ -245,11 +245,11 @@ class CheckValid(PrimitiveWithInfer):
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import operations as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.check_valid = P.CheckValid()
... self.check_valid = ops.CheckValid()
... def construct(self, x, y):
... valid_result = self.check_valid(x, y)
... return valid_result
@@ -313,7 +313,7 @@ class IOU(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> iou = P.IOU()
>>> iou = ops.IOU()
>>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
>>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
>>> output = iou(anchor_boxes, gt_boxes)
@@ -363,16 +363,16 @@ class MakeRefKey(Primitive):
``Ascend`` ``GPU`` ``CPU``

Examples:
>>> from mindspore.ops import functional as F
>>> from mindspore.ops import functional as ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y")
... self.make_ref_key = P.MakeRefKey("y")
... self.make_ref_key = ops.MakeRefKey("y")
...
... def construct(self, x):
... key = self.make_ref_key()
... ref = F.make_ref(key, x, self.y)
... ref = ops.make_ref(key, x, self.y)
... return ref * x
...
>>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32)
@@ -451,7 +451,7 @@ class CheckBprop(PrimitiveWithInfer):
Examples:
>>> input_x = (Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32),)
>>> input_y = (Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32),)
>>> out = P.CheckBprop()(input_x, input_y)
>>> out = ops.CheckBprop()(input_x, input_y)
"""

@prim_attr_register
@@ -519,7 +519,7 @@ class ConfusionMatrix(PrimitiveWithInfer):
Tensor, the confusion matrix, with shape (`num_classes`, `num_classes`).

Examples:
>>> confusion_matrix = P.ConfusionMatrix(4)
>>> confusion_matrix = ops.ConfusionMatrix(4)
>>> labels = Tensor([0, 1, 1, 3], mindspore.int32)
>>> predictions = Tensor([1, 2, 1, 3], mindspore.int32)
>>> output = confusion_matrix(labels, predictions)
@@ -567,7 +567,7 @@ class PopulationCount(PrimitiveWithInfer):
``Ascend``

Examples:
>>> population_count = P.PopulationCount()
>>> population_count = ops.PopulationCount()
>>> x_input = Tensor([0, 1, 3], mindspore.int16)
>>> output = population_count(x_input)
>>> print(output)


+ 10
- 10
mindspore/ops/operations/random_ops.py View File

@@ -39,7 +39,7 @@ class StandardNormal(PrimitiveWithInfer):

Examples:
>>> shape = (4, 16)
>>> stdnormal = P.StandardNormal(seed=2)
>>> stdnormal = ops.StandardNormal(seed=2)
>>> output = stdnormal(shape)
>>> result = output.shape
>>> print(result)
@@ -90,7 +90,7 @@ class StandardLaplace(PrimitiveWithInfer):

Examples:
>>> shape = (4, 16)
>>> stdlaplace = P.StandardLaplace(seed=2)
>>> stdlaplace = ops.StandardLaplace(seed=2)
>>> output = stdlaplace(shape)
>>> result = output.shape
>>> print(result)
@@ -148,7 +148,7 @@ class Gamma(PrimitiveWithInfer):
>>> shape = (2, 2)
>>> alpha = Tensor(1.0, mstype.float32)
>>> beta = Tensor(1.0, mstype.float32)
>>> gamma = P.Gamma(seed=3)
>>> gamma = ops.Gamma(seed=3)
>>> output = gamma(shape, alpha, beta)
>>> print(output)
[[0.21962446 0.33740655]
@@ -206,7 +206,7 @@ class Poisson(PrimitiveWithInfer):
Examples:
>>> shape = (4, 16)
>>> mean = Tensor(5.0, mstype.float32)
>>> poisson = P.Poisson(seed=5)
>>> poisson = ops.Poisson(seed=5)
>>> output = poisson(shape, mean)
"""

@@ -265,7 +265,7 @@ class UniformInt(PrimitiveWithInfer):
>>> shape = (2, 4)
>>> minval = Tensor(1, mstype.int32)
>>> maxval = Tensor(5, mstype.int32)
>>> uniform_int = P.UniformInt(seed=10)
>>> uniform_int = ops.UniformInt(seed=10)
>>> output = uniform_int(shape, minval, maxval)
>>> print(output)
[[4 2 1 3]
@@ -318,7 +318,7 @@ class UniformReal(PrimitiveWithInfer):

Examples:
>>> shape = (2, 2)
>>> uniformreal = P.UniformReal(seed=2)
>>> uniformreal = ops.UniformReal(seed=2)
>>> output = uniformreal(shape)
>>> print(output)
[[0.4359949 0.18508208]
@@ -374,7 +374,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer):
``Ascend`` ``GPU``

Examples:
>>> rnd_choice_mask = P.RandomChoiceWithMask()
>>> rnd_choice_mask = ops.RandomChoiceWithMask()
>>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool))
>>> output_y, output_mask = rnd_choice_mask(input_x)
>>> result = output_y.shape
@@ -426,7 +426,7 @@ class RandomCategorical(PrimitiveWithInfer):
>>> class Net(nn.Cell):
... def __init__(self, num_sample):
... super(Net, self).__init__()
... self.random_categorical = P.RandomCategorical(mindspore.int64)
... self.random_categorical = ops.RandomCategorical(mindspore.int64)
... self.num_sample = num_sample
... def construct(self, logits, seed=0):
... return self.random_categorical(logits, self.num_sample, seed)
@@ -502,7 +502,7 @@ class Multinomial(PrimitiveWithInfer):

Examples:
>>> input = Tensor([0., 9., 4., 0.], mstype.float32)
>>> multinomial = P.Multinomial(seed=10)
>>> multinomial = ops.Multinomial(seed=10)
>>> output = multinomial(input, 2)
"""

@@ -561,7 +561,7 @@ class UniformCandidateSampler(PrimitiveWithInfer):
each of sampled_candidates. Shape: (num_sampled, ).

Examples:
>>> sampler = P.UniformCandidateSampler(1, 3, False, 4)
>>> sampler = ops.UniformCandidateSampler(1, 3, False, 4)
>>> output1, output2, output3 = sampler(Tensor(np.array([[1],[3],[4],[6],[3]], dtype=np.int32)))
>>> print(output1, output2, output3)
[1, 1, 3], [[0.75], [0.75], [0.75], [0.75], [0.75]], [0.75, 0.75, 0.75]


+ 1
- 1
mindspore/ops/operations/sparse_ops.py View File

@@ -38,7 +38,7 @@ class SparseToDense(PrimitiveWithInfer):
>>> indices = Tensor([[0, 1], [1, 2]])
>>> values = Tensor([1, 2], dtype=ms.float32)
>>> dense_shape = (3, 4)
>>> out = P.SparseToDense()(indices, values, dense_shape)
>>> out = ops.SparseToDense()(indices, values, dense_shape)
"""

@prim_attr_register


Loading…
Cancel
Save