Browse Source

!8320 Updating several files' notes in ops folder

From: @zhangz0911gm
Reviewed-by: 
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
ea0c13bcda
11 changed files with 106 additions and 20 deletions
  1. +1
    -0
      mindspore/ops/composite/random_ops.py
  2. +2
    -2
      mindspore/ops/op_selector.py
  3. +4
    -0
      mindspore/ops/operations/_inner_ops.py
  4. +5
    -5
      mindspore/ops/operations/_quant_ops.py
  5. +1
    -1
      mindspore/ops/operations/_thor_ops.py
  6. +59
    -5
      mindspore/ops/operations/array_ops.py
  7. +10
    -1
      mindspore/ops/operations/math_ops.py
  8. +18
    -2
      mindspore/ops/operations/nn_ops.py
  9. +1
    -1
      mindspore/ops/operations/other_ops.py
  10. +2
    -0
      mindspore/ops/operations/random_ops.py
  11. +3
    -3
      mindspore/ops/primitive.py

+ 1
- 0
mindspore/ops/composite/random_ops.py View File

@@ -50,6 +50,7 @@ def normal(shape, mean, stddev, seed=None):
>>> mean = Tensor(1.0, mstype.float32)
>>> stddev = Tensor(1.0, mstype.float32)
>>> output = C.normal(shape, mean, stddev, seed=5)
>>> print(output)
[[1.0996436 0.44371283 0.11127508 -0.48055804]
[0.31989878 -1.0644426 1.5076542 1.2290289 ]]
"""


+ 2
- 2
mindspore/ops/op_selector.py View File

@@ -40,7 +40,7 @@ class _OpSelector:
Examples:
>>> class A: pass
>>> selected_op = _OpSelector(A, "GraphKernel",
>>> "graph_kernel.ops.pkg", "primitive.ops.pkg")
... "graph_kernel.ops.pkg", "primitive.ops.pkg")
>>> # selected_op() will call graph_kernel.ops.pkg.A()
"""
GRAPH_KERNEL = "GraphKernel"
@@ -92,7 +92,7 @@ def new_ops_selector(primitive_pkg, graph_kernel_pkg):

Examples:
>>> op_selector = new_ops_selector("primitive_pkg.some.path",
>>> "graph_kernel_pkg.some.path")
... "graph_kernel_pkg.some.path")
>>> @op_selector
>>> class ReduceSum: pass
"""


+ 4
- 0
mindspore/ops/operations/_inner_ops.py View File

@@ -294,6 +294,7 @@ class LinSpace(PrimitiveWithInfer):
>>> stop = Tensor(10, mindspore.float32)
>>> num = Tensor(5, mindspore.int32)
>>> output = linspace(assist, start, stop, num)
>>> print(output)
[12.25, 13.375]
"""

@@ -329,6 +330,7 @@ class MatrixDiag(PrimitiveWithInfer):
>>> assist = Tensor(np.arange(-12, 0).reshape(3, 2, 2), mindspore.float32)
>>> matrix_diag = P.MatrixDiag()
>>> result = matrix_diag(x, assist)
>>> print(result)
[[[-12. 11.]
[-10. 9.]]
[[ -8. 7.]
@@ -382,6 +384,7 @@ class MatrixDiagPart(PrimitiveWithInfer):
>>> assist = Tensor(np.arange(-12, 0).reshape(3, 2, 2), mindspore.float32)
>>> matrix_diag_part = P.MatrixDiagPart()
>>> result = matrix_diag_part(x, assist)
>>> print(result)
[[12., -9.], [8., -5.], [4., -1.]]
"""

@@ -424,6 +427,7 @@ class MatrixSetDiag(PrimitiveWithInfer):
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
>>> matrix_set_diag = P.MatrixSetDiag()
>>> result = matrix_set_diag(x, diagonal)
>>> print(result)
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]

"""


+ 5
- 5
mindspore/ops/operations/_quant_ops.py View File

@@ -187,7 +187,7 @@ class FakeQuantWithMinMaxVars(PrimitiveWithInfer):
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> output_tensor = FakeQuantWithMinMaxVars(num_bits=8, narrow_range=False)(
>>> input_tensor, min_tensor, max_tensor)
... input_tensor, min_tensor, max_tensor)
>>> output_tensor shape: (3, 16, 5, 5) data type: mstype.float32
"""

@@ -249,7 +249,7 @@ class FakeQuantWithMinMaxVarsGradient(PrimitiveWithInfer):
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> x_gradient, min_gradient, max_gradient = FakeQuantWithMinMaxVarsGradient(num_bits=8,narrow_range=False)
>>> (gradients, input_tensor, min_tensor, max_tensor)
... (gradients, input_tensor, min_tensor, max_tensor)
>>> x_gradient shape: (3, 16, 5, 5) data type: mstype.float32
>>> min_gradient shape: (1,) data type: mstype.float32
>>> max_gradient shape: (1,) data type: mstype.float32
@@ -310,7 +310,7 @@ class FakeQuantWithMinMaxVarsPerChannel(PrimitiveWithInfer):
>>> min_tensor = Tensor(np.array([-6, -1, -2, -3]), mstype.float32)
>>> max_tensor = Tensor(np.array([6, 1, 2, 3]), mstype.float32)
>>> output_tensor = FakeQuantWithMinMaxVars(num_bits=8, narrow_range=False)(
>>> input_tensor, min_tensor, max_tensor)
... input_tensor, min_tensor, max_tensor)
>>> output_tensor shape: (3, 16, 3, 4) data type: mstype.float32
"""

@@ -365,8 +365,8 @@ class FakeQuantWithMinMaxVarsPerChannelGradient(PrimitiveWithInfer):
>>> min_tensor = Tensor(np.array([-6, -1, -2, -3]), mstype.float32)
>>> max_tensor = Tensor(np.array([6, 1, 2, 3]), mstype.float32)
>>> x_gradient, min_gradient, max_gradient = FakeQuantWithMinMaxVarsPerChannelGradient(
>>> num_bits=8, narrow_range=False)(
>>> gradients, input_tensor, min_tensor, max_tensor)
... num_bits=8, narrow_range=False)(
... gradients, input_tensor, min_tensor, max_tensor)
>>> x_gradient shape: (3, 16, 3, 4) data type: mstype.float32
>>> min_gradient shape: (4,) data type: mstype.float32
>>> max_gradient shape: (4,) data type: mstype.float32


+ 1
- 1
mindspore/ops/operations/_thor_ops.py View File

@@ -585,7 +585,7 @@ class UpdateThorGradient(PrimitiveWithInfer):
>>> temp_x3 = np.random.rand(8, 128, 128).astype(np.float32)
>>> input_x3 = np.zeros(16,8,128,128).astype(np.float32)
>>> for i in range(16):
>>> input_x3[i,:,:,:] = temp_x3
... input_x3[i,:,:,:] = temp_x3
>>> input_x3 = Tensor(input_x3)
>>> update_thor_gradient = P.UpdateThorGradient(split_dim=128)
>>> output = update_thor_gradient(input_x1, input_x2, input_x3)


+ 59
- 5
mindspore/ops/operations/array_ops.py View File

@@ -147,6 +147,7 @@ class ExpandDims(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> expand_dims = P.ExpandDims()
>>> output = expand_dims(input_tensor, 0)
>>> print(output)
[[[2.0, 2.0],
[2.0, 2.0]]]
"""
@@ -230,6 +231,7 @@ class SameTypeShape(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> out = P.SameTypeShape()(input_x, input_y)
>>> print(out)
[[2. 2.]
[2. 2.]]
"""
@@ -341,6 +343,7 @@ class IsSubClass(PrimitiveWithInfer):

Examples:
>>> result = P.IsSubClass()(mindspore.int32, mindspore.intc)
>>> print(result)
True
"""

@@ -377,6 +380,7 @@ class IsInstance(PrimitiveWithInfer):
Examples:
>>> a = 1
>>> result = P.IsInstance()(a, mindspore.int32)
>>> print(result)
True
"""

@@ -424,6 +428,7 @@ class Reshape(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> reshape = P.Reshape()
>>> output = reshape(input_tensor, (3, 2))
>>> print(output)
[[-0.1 0.3]
[3.6 0.4 ]
[0.5 -3.2]]
@@ -490,6 +495,7 @@ class Shape(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> shape = P.Shape()
>>> output = shape(input_tensor)
>>> print(output)
(3, 2, 1)
"""

@@ -554,6 +560,7 @@ class Squeeze(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> squeeze = P.Squeeze(2)
>>> output = squeeze(input_tensor)
>>> print(output)
[[1. 1.]
[1. 1.]
[1. 1.]]
@@ -609,6 +616,7 @@ class Transpose(PrimitiveWithCheck):
>>> perm = (0, 2, 1)
>>> transpose = P.Transpose()
>>> output = transpose(input_tensor, perm)
>>> print(output)
[[[1. 4.]
[2. 5.]
[3. 6.]]
@@ -673,6 +681,7 @@ class GatherV2(PrimitiveWithCheck):
>>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
>>> axis = 1
>>> out = P.GatherV2()(input_params, input_indices, axis)
>>> print(out)
[[2.0, 7.0],
[4.0, 54.0],
[2.0, 55.0]]
@@ -746,6 +755,7 @@ class Padding(PrimitiveWithInfer):
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
>>> pad_dim_size = 4
>>> out = P.Padding(pad_dim_size)(x)
>>> print(out)
[[8, 0, 0, 0], [10, 0, 0, 0]]
"""

@@ -786,6 +796,7 @@ class UniqueWithPad(PrimitiveWithInfer):
>>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)
>>> pad_num = 8
>>> out = P.UniqueWithPad()(x, pad_num)
>>> print(out)
([1, 5, 4, 3, 2, 8, 8, 8, 8, 8], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
"""

@@ -829,6 +840,7 @@ class Split(PrimitiveWithInfer):
>>> split = P.Split(1, 2)
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
>>> output = split(x)
>>> print(output)
([[1, 1],
[2, 2]],
[[1, 1],
@@ -884,7 +896,8 @@ class Rank(PrimitiveWithInfer):
Examples:
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> rank = P.Rank()
>>> rank(input_tensor)
>>> output = rank(input_tensor)
>>> print(output)
2
"""

@@ -956,6 +969,7 @@ class Size(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> size = P.Size()
>>> output = size(input_tensor)
>>> print(output)
4
"""

@@ -993,7 +1007,8 @@ class Fill(PrimitiveWithInfer):

Examples:
>>> fill = P.Fill()
>>> fill(mindspore.float32, (2, 2), 1)
>>> output = fill(mindspore.float32, (2, 2), 1)
>>> print(output)
[[1.0, 1.0],
[1.0, 1.0]]
"""
@@ -1124,6 +1139,7 @@ class OnesLike(PrimitiveWithInfer):
>>> oneslike = P.OnesLike()
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> output = oneslike(x)
>>> print(output)
[[1, 1],
[1, 1]]
"""
@@ -1156,6 +1172,7 @@ class ZerosLike(PrimitiveWithCheck):
>>> zeroslike = P.ZerosLike()
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> output = zeroslike(x)
>>> print(output)
[[0.0, 0.0],
[0.0, 0.0]]
"""
@@ -1184,6 +1201,7 @@ class TupleToArray(PrimitiveWithInfer):

Examples:
>>> type = P.TupleToArray()((1,2,3))
>>> print(type)
[1 2 3]
"""

@@ -1228,6 +1246,7 @@ class ScalarToArray(PrimitiveWithInfer):
>>> op = P.ScalarToArray()
>>> data = 1.0
>>> output = op(data)
>>> print(output)
1.0
"""

@@ -1260,6 +1279,7 @@ class ScalarToTensor(PrimitiveWithInfer):
>>> op = P.ScalarToTensor()
>>> data = 1
>>> output = op(data, mindspore.float32)
>>> print(output)
1.0
"""

@@ -1365,6 +1385,7 @@ class Argmax(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> index = P.Argmax(output_type=mindspore.int32)(input_x)
>>> print(index)
1
"""

@@ -1523,6 +1544,7 @@ class ArgMinWithValue(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
>>> index, output = P.ArgMinWithValue()(input_x)
>>> print((index, output))
0 0.0496291
"""

@@ -1579,6 +1601,7 @@ class Tile(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
>>> multiples = (2, 3)
>>> result = tile(input_x, multiples)
>>> print(result)
[[1. 2. 1. 2. 1. 2.]
[3. 4. 3. 4. 3. 4.]
[1. 2. 1. 2. 1. 2.]
@@ -1884,6 +1907,7 @@ class Concat(PrimitiveWithInfer):
>>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> op = P.Concat()
>>> output = op((data1, data2))
>>> print(output)
[[0, 1],
[2, 1],
[0, 1],
@@ -1931,6 +1955,7 @@ class ParallelConcat(PrimitiveWithInfer):
>>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))
>>> op = P.ParallelConcat()
>>> output = op((data1, data2))
>>> print(output)
[[0, 1], [2, 1]]
"""

@@ -2013,6 +2038,7 @@ class Pack(PrimitiveWithInfer):
>>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
>>> pack = P.Pack()
>>> output = pack([data1, data2])
>>> print(output)
[[0, 1], [2, 3]]
"""

@@ -2062,6 +2088,7 @@ class Unpack(PrimitiveWithInfer):
>>> unpack = P.Unpack()
>>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
>>> output = unpack(input_x)
>>> print(output)
([1, 1, 1, 1], [2, 2, 2, 2])
"""

@@ -2113,9 +2140,10 @@ class Slice(PrimitiveWithInfer):

Examples:
>>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
>>> [[3, 3, 3], [4, 4, 4]],
>>> [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
... [[3, 3, 3], [4, 4, 4]],
... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
>>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3))
>>> print(type)
[[[3 3 3]]]
"""

@@ -2164,6 +2192,7 @@ class ReverseV2(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
>>> op = P.ReverseV2(axis=[1])
>>> output = op(input_x)
>>> print(output)
[[4, 3, 2, 1], [8, 7, 6, 5]]
"""

@@ -2201,6 +2230,7 @@ class Rint(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
>>> op = P.Rint()
>>> output = op(input_x)
>>> print(output)
[-2., 0., 2., 2.]
"""

@@ -2391,7 +2421,7 @@ class StridedSlice(PrimitiveWithInfer):

Examples
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
>>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> slice = P.StridedSlice()
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
>>> output.shape
@@ -2643,6 +2673,7 @@ class Eye(PrimitiveWithInfer):
Examples:
>>> eye = P.Eye()
>>> out_tensor = eye(2, 2, mindspore.int32)
>>> print(out_tensor)
[[1, 0],
[0, 1]]
"""
@@ -2681,6 +2712,7 @@ class ScatterNd(PrimitiveWithInfer):
>>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32)
>>> shape = (3, 3)
>>> output = op(indices, update, shape)
>>> print(output)
[[0. 3.2 0.]
[0. 1.1 0.]
[0. 0. 0. ]]
@@ -2731,6 +2763,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
>>> resize = P.ResizeNearestNeighbor((2, 2))
>>> output = resize(input_tensor)
>>> print(output)
[[[[-0.1 0.3]
[0.4 0.5 ]]]]
"""
@@ -2772,6 +2805,7 @@ class GatherNd(PrimitiveWithInfer):
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> op = P.GatherNd()
>>> output = op(input_x, indices)
>>> print(output)
[-0.1, 0.5]
"""

@@ -2863,6 +2897,7 @@ class ScatterUpdate(_ScatterOp_Dynamic):
>>> updates = Tensor(np_updates, mindspore.float32)
>>> op = P.ScatterUpdate()
>>> output = op(input_x, indices, updates)
>>> print(output)
[[2.0, 1.2, 1.0],
[3.0, 1.2, 1.0]]
"""
@@ -2901,6 +2936,7 @@ class ScatterNdUpdate(_ScatterNdOp):
>>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> op = P.ScatterNdUpdate()
>>> output = op(input_x, indices, update)
>>> print(output)
[[1. 0.3 3.6]
[0.4 2.2 -3.2]]
"""
@@ -2948,6 +2984,7 @@ class ScatterMax(_ScatterOp):
>>> update = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
>>> scatter_max = P.ScatterMax()
>>> output = scatter_max(input_x, indices, update)
>>> print(output)
[[88.0, 88.0, 88.0], [88.0, 88.0, 88.0]]
"""

@@ -2988,6 +3025,7 @@ class ScatterMin(_ScatterOp):
>>> update = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> scatter_min = P.ScatterMin()
>>> output = scatter_min(input_x, indices, update)
>>> print(output)
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0]]
"""

@@ -3022,6 +3060,7 @@ class ScatterAdd(_ScatterOp_Dynamic):
>>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> scatter_add = P.ScatterAdd()
>>> output = scatter_add(input_x, indices, updates)
>>> print(output)
[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]]
"""
@prim_attr_register
@@ -3062,6 +3101,7 @@ class ScatterSub(_ScatterOp):
>>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)
>>> scatter_sub = P.ScatterSub()
>>> output = scatter_sub(input_x, indices, updates)
>>> print(output)
[[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]
"""

@@ -3096,6 +3136,7 @@ class ScatterMul(_ScatterOp):
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
>>> scatter_mul = P.ScatterMul()
>>> output = scatter_mul(input_x, indices, updates)
>>> print(output)
[[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]]
"""

@@ -3130,6 +3171,7 @@ class ScatterDiv(_ScatterOp):
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
>>> scatter_div = P.ScatterDiv()
>>> output = scatter_div(input_x, indices, updates)
>>> print(output)
[[3.0, 3.0, 3.0], [1.0, 1.0, 1.0]]
"""

@@ -3164,6 +3206,7 @@ class ScatterNdAdd(_ScatterNdOp):
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_nd_add = P.ScatterNdAdd()
>>> output = scatter_nd_add(input_x, indices, updates)
>>> print(output)
[1, 10, 9, 4, 12, 6, 7, 17]
"""

@@ -3198,6 +3241,7 @@ class ScatterNdSub(_ScatterNdOp):
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_nd_sub = P.ScatterNdSub()
>>> output = scatter_nd_sub(input_x, indices, updates)
>>> print(output)
[1, -6, -3, 4, -2, 6, 7, -1]
"""

@@ -3229,6 +3273,7 @@ class ScatterNonAliasingAdd(_ScatterNdOp):
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
>>> scatter_non_aliasing_add = P.ScatterNonAliasingAdd()
>>> output = scatter_non_aliasing_add(input_x, indices, updates)
>>> print(output)
[1, 10, 9, 4, 12, 6, 7, 17]
"""

@@ -3466,6 +3511,7 @@ class BatchToSpace(PrimitiveWithInfer):
>>> op = P.BatchToSpace(block_size, crops)
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = op(input_x)
>>> print(output)
[[[[1., 2.], [3., 4.]]]]

"""
@@ -3635,6 +3681,7 @@ class BatchToSpaceND(PrimitiveWithInfer):
>>> batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
>>> output = batch_to_space_nd(input_x)
>>> print(output)
[[[[1., 2.], [3., 4.]]]]

"""
@@ -3860,6 +3907,7 @@ class InplaceUpdate(PrimitiveWithInfer):
>>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
>>> inplace_update = P.InplaceUpdate(indices)
>>> result = inplace_update(x, v)
>>> print(result)
[[0.5, 1.0],
[1.0, 1.5],
[5.0, 6.0]]
@@ -3915,6 +3963,7 @@ class ReverseSequence(PrimitiveWithInfer):
>>> seq_lengths = Tensor(np.array([1, 2, 3]))
>>> reverse_sequence = P.ReverseSequence(seq_dim=1)
>>> output = reverse_sequence(x, seq_lengths)
>>> print(output)
[[1 2 3]
[5 4 6]
[9 8 7]]
@@ -3993,6 +4042,7 @@ class EditDistance(PrimitiveWithInfer):
>>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
>>> edit_distance = EditDistance(hypothesis_shape, truth_shape)
>>> out = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)
>>> print(out)
>>> [[1.0, 1.0], [1.0, 1.0]]
"""

@@ -4126,6 +4176,7 @@ class EmbeddingLookup(PrimitiveWithInfer):
>>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
>>> offset = 4
>>> out = P.EmbeddingLookup()(input_params, input_indices, offset)
>>> print(out)
[[[10, 11], [0 ,0]], [[0, 0], [10, 11]]]
"""

@@ -4168,6 +4219,7 @@ class GatherD(PrimitiveWithInfer):
>>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> dim = 1
>>> out = P.GatherD()(x, dim, index)
>>> print(out)
[[1, 1], [4, 3]]
"""

@@ -4212,6 +4264,7 @@ class Identity(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
>>> y = P.Identity()(x)
>>> print(y)
[1, 2, 3, 4]
"""

@@ -4246,6 +4299,7 @@ class RepeatElements(PrimitiveWithInfer):
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
>>> repeat_elements = P.RepeatElements(rep = 2, axis = 0)
>>> output = repeat_elements(x)
>>> print(output)
[[0, 1, 2],
[0, 1, 2],
[3, 4, 5],


+ 10
- 1
mindspore/ops/operations/math_ops.py View File

@@ -460,6 +460,7 @@ class ReduceAny(_Reduce):
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
>>> op = P.ReduceAny(keep_dims=True)
>>> output = op(input_x, 1)
>>> print(output)
[[True],
[True]]
"""
@@ -983,6 +984,7 @@ class Neg(PrimitiveWithInfer):
>>> neg = P.Neg()
>>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
>>> result = neg(input_x)
>>> print(result)
[-1. -2. 1. -2. 0. 3.5]
"""

@@ -2893,7 +2895,8 @@ class NPUClearFloatStatus(PrimitiveWithInfer):
>>> init = alloc_status()
>>> flag = get_status(init)
>>> clear = clear_status(init)
Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
>>> print(clear)
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
"""

@prim_attr_register
@@ -3144,6 +3147,7 @@ class Sign(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
>>> sign = P.Sign()
>>> output = sign(input_x)
>>> print(output)
[[1.0, 0.0, -1.0]]
"""

@@ -3440,6 +3444,7 @@ class BesselI0e(PrimitiveWithInfer):
>>> bessel_i0e = P.BesselI0e()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = bessel_i0e(input_x)
>>> print(output)
[0.7979961, 0.5144438, 0.75117415, 0.9157829]
"""

@@ -3470,6 +3475,7 @@ class BesselI1e(PrimitiveWithInfer):
>>> bessel_i1e = P.BesselI1e()
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = bessel_i1e(input_x)
>>> print(output)
[0.09507662, 0.19699717, 0.11505538, 0.04116856]
"""

@@ -3500,6 +3506,7 @@ class Inv(PrimitiveWithInfer):
>>> inv = P.Inv()
>>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
>>> output = inv(input_x)
>>> print(output)
[4., 2.5, 3.2258065, 1.923077]
"""

@@ -3530,6 +3537,7 @@ class Invert(PrimitiveWithInfer):
>>> invert = P.Invert()
>>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
>>> output = invert(input_x)
>>> print(output)
[-26, -5, -14, -10]
"""

@@ -3558,6 +3566,7 @@ class Eps(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor([4, 1, 2, 3], mindspore.float32)
>>> out = P.Eps()(input_x)
>>> print(out)
[1.52587891e-05, 1.52587891e-05, 1.52587891e-05, 1.52587891e-05]
"""



+ 18
- 2
mindspore/ops/operations/nn_ops.py View File

@@ -288,6 +288,7 @@ class ReLU(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> result = relu(input_x)
>>> print(result)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""

@@ -320,6 +321,7 @@ class ReLU6(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
>>> print(result)
[[0. 4. 0.]
[2. 0. 6.]]
"""
@@ -413,8 +415,9 @@ class Elu(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
>>> print(result)
[[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]]
"""

@prim_attr_register
@@ -1558,6 +1561,7 @@ class AvgPool(_Pool):
>>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> result = net(input_x)
>>> print(result)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[ 14.5 15.5 16.5]
@@ -1828,6 +1832,7 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> loss, backprop = softmax_cross(logits, labels)
>>> print((loss, backprop))
([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],
[0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])
"""
@@ -2850,6 +2855,7 @@ class PReLU(PrimitiveWithInfer):
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> net = Net()
>>> result = net(input_x, weight)
>>> print(result)
[[[-0.1, 1.0],
[0.0, 2.0],
[0.0, 0.0]],
@@ -3106,6 +3112,7 @@ class MirrorPad(PrimitiveWithInfer):
>>> paddings = Tensor([[1,1],[2,2]])
>>> pad = Net()
>>> ms_output = pad(Tensor(x), paddings)
>>> print(ms_output)
[[0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144]
[0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ]
[0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144]
@@ -3176,6 +3183,7 @@ class ROIAlign(PrimitiveWithInfer):
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> output_tensor = roi_align(input_tensor, rois)
>>> print(output_tensor)
[[[[1.77499998e+00, 2.02500010e+00],
[2.27500010e+00, 2.52500010e+00]]]]
"""
@@ -3879,6 +3887,7 @@ class BinaryCrossEntropy(PrimitiveWithInfer):
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> result = net(input_x, input_y, weight)
>>> print(result)
0.38240486
"""

@@ -4363,6 +4372,7 @@ class SparseApplyAdagrad(PrimitiveWithInfer):
>>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32))
>>> indices = Tensor([0], mstype.int32)
>>> result = net(grad, indices)
>>> print(result)
([[[1.0]]], [[[1.0]]])
"""

@@ -4452,6 +4462,7 @@ class SparseApplyAdagradV2(PrimitiveWithInfer):
>>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32))
>>> indices = Tensor([0], mstype.int32)
>>> result = net(grad, indices)
>>> print(result)
([[[1.0]]], [[[1.67194188]]])
"""

@@ -4649,6 +4660,7 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck):
>>> grad = Tensor(np.random.rand(1, 3).astype(np.float32))
>>> indices = Tensor(np.ones((1,), np.int32))
>>> output = net(grad, indices)
>>> print(output)
([[6.94971561e-01, 5.24479389e-01, 5.52502394e-01]],
[[1.69961065e-01, 9.21632349e-01, 7.83344746e-01]])
"""
@@ -5178,6 +5190,7 @@ class ApplyFtrl(PrimitiveWithInfer):
>>> net = ApplyFtrlNet()
>>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)
>>> result = net(input_x)
>>> print(result)
[[0.67455846 0.14630564 0.160499 ]
[0.16329421 0.00415689 0.05202988]
[0.18672481 0.17418946 0.36420345]]
@@ -5265,6 +5278,7 @@ class SparseApplyFtrl(PrimitiveWithCheck):
>>> grad = Tensor(np.random.rand(1, 1).astype(np.float32))
>>> indices = Tensor(np.ones([1]), mindspore.int32)
>>> output = net(grad, indices)
>>> print(output)
([[1.02914639e-01]], [[7.60280550e-01]], [[7.64630079e-01]])
"""

@@ -5363,6 +5377,7 @@ class SparseApplyFtrlV2(PrimitiveWithInfer):
>>> grad = Tensor(np.random.rand(1, 3).astype(np.float32))
>>> indices = Tensor(np.ones([1]), mindspore.int32)
>>> output = net(grad, indices)
>>> print(output)
([[3.98493223e-02, 4.38684933e-02, 8.25387388e-02]],
[[6.40987396e-01, 7.19417334e-01, 1.52606890e-01]],
[[7.43463933e-01, 2.92334408e-01, 6.81572020e-01]])
@@ -5876,6 +5891,7 @@ class InTopK(PrimitiveWithInfer):
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> result = in_top_k(x1, x2)
>>> print(result)
[True False]
"""



+ 1
- 1
mindspore/ops/operations/other_ops.py View File

@@ -169,7 +169,7 @@ class BoundingBoxDecode(PrimitiveWithInfer):
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
>>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
>>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0),
>>> max_shape=(768, 1280), wh_ratio_clip=0.016)
... max_shape=(768, 1280), wh_ratio_clip=0.016)
>>> boundingbox_decode(anchor_box, deltas)
[[4.1953125 0. 0. 5.1953125]
[2.140625 0. 3.859375 60.59375]]


+ 2
- 0
mindspore/ops/operations/random_ops.py View File

@@ -250,6 +250,7 @@ class UniformInt(PrimitiveWithInfer):
>>> maxval = Tensor(5, mstype.int32)
>>> uniform_int = P.UniformInt(seed=10)
>>> output = uniform_int(shape, minval, maxval)
>>> print(output)
[[4 2 1 3]
[4 3 4 5]]
"""
@@ -299,6 +300,7 @@ class UniformReal(PrimitiveWithInfer):
>>> shape = (2, 2)
>>> uniformreal = P.UniformReal(seed=2)
>>> output = uniformreal(shape)
>>> print(output)
[[0.4359949 0.18508208]
[0.02592623 0.93154085]]
"""


+ 3
- 3
mindspore/ops/primitive.py View File

@@ -477,13 +477,13 @@ def constexpr(fn=None, get_instance=True, name=None):
>>> # make an operator to calculate tuple len
>>> @constexpr
>>> def tuple_len(x):
>>> return len(x)
... return len(x)
>>> assert tuple_len(a) == 2
>>>
...
>>> # make a operator class to calculate tuple len
>>> @constexpr(get_instance=False, name="TupleLen")
>>> def tuple_len_class(x):
>>> return len(x)
... return len(x)
>>> assert tuple_len_class()(a) == 2
"""



Loading…
Cancel
Save