| @@ -132,7 +132,8 @@ class Range(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> range = P.Range(1.0, 8.0, 2.0) | >>> range = P.Range(1.0, 8.0, 2.0) | ||||
| >>> x = Tensor(np.array([1, 2, 3, 2]), mindspore.int32) | >>> x = Tensor(np.array([1, 2, 3, 2]), mindspore.int32) | ||||
| >>> range(x) | |||||
| >>> output = range(x) | |||||
| >>> print(output) | |||||
| [3, 5, 7, 5] | [3, 5, 7, 5] | ||||
| """ | """ | ||||
| @@ -524,7 +525,8 @@ class DynamicGRUV2(PrimitiveWithInfer): | |||||
| >>> init_h = Tensor(np.random.rand(8, 16).astype(np.float16)) | >>> init_h = Tensor(np.random.rand(8, 16).astype(np.float16)) | ||||
| >>> dynamic_gru_v2 = P.DynamicGRUV2() | >>> dynamic_gru_v2 = P.DynamicGRUV2() | ||||
| >>> output = dynamic_gru_v2(x, weight_i, weight_h, bias_i, bias_h, None, init_h) | >>> output = dynamic_gru_v2(x, weight_i, weight_h, bias_i, bias_h, None, init_h) | ||||
| >>> output[0].shape | |||||
| >>> result = output[0].shape | |||||
| >>> print(result) | |||||
| (2, 8, 16) | (2, 8, 16) | ||||
| """ | """ | ||||
| @@ -1072,7 +1072,8 @@ class Ones(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> ones = P.Ones() | >>> ones = P.Ones() | ||||
| >>> Ones((2, 2), mindspore.float32) | |||||
| >>> output = Ones((2, 2), mindspore.float32) | |||||
| >>> print(output) | |||||
| [[1.0, 1.0], | [[1.0, 1.0], | ||||
| [1.0, 1.0]] | [1.0, 1.0]] | ||||
| """ | """ | ||||
| @@ -1115,7 +1116,8 @@ class Zeros(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> zeros = P.Zeros() | >>> zeros = P.Zeros() | ||||
| >>> Zeros((2, 2), mindspore.float32) | |||||
| >>> output = Zeros((2, 2), mindspore.float32) | |||||
| >>> print(output) | |||||
| [[0.0, 0.0], | [[0.0, 0.0], | ||||
| [0.0, 0.0]] | [0.0, 0.0]] | ||||
| @@ -2615,7 +2617,8 @@ class Diag(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor([1, 2, 3, 4]) | >>> input_x = Tensor([1, 2, 3, 4]) | ||||
| >>> diag = P.Diag() | >>> diag = P.Diag() | ||||
| >>> diag(input_x) | |||||
| >>> output = diag(input_x) | |||||
| >>> print(output) | |||||
| [[1, 0, 0, 0], | [[1, 0, 0, 0], | ||||
| [0, 2, 0, 0], | [0, 2, 0, 0], | ||||
| [0, 0, 3, 0], | [0, 0, 3, 0], | ||||
| @@ -348,7 +348,8 @@ class ReduceMean(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceMean(keep_dims=True) | >>> op = P.ReduceMean(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| >>> result = output.shape | |||||
| >>> print(result) | |||||
| (3, 1, 5, 6) | (3, 1, 5, 6) | ||||
| """ | """ | ||||
| @@ -501,7 +502,8 @@ class ReduceMax(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceMax(keep_dims=True) | >>> op = P.ReduceMax(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| >>> result = output.shape | |||||
| >>> print(result) | |||||
| (3, 1, 5, 6) | (3, 1, 5, 6) | ||||
| """ | """ | ||||
| @@ -545,7 +547,8 @@ class ReduceMin(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceMin(keep_dims=True) | >>> op = P.ReduceMin(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| >>> result = output.shape | |||||
| >>> print(result) | |||||
| (3, 1, 5, 6) | (3, 1, 5, 6) | ||||
| """ | """ | ||||
| @@ -580,7 +583,8 @@ class ReduceProd(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceProd(keep_dims=True) | >>> op = P.ReduceProd(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| >>> reuslt = output.shape | |||||
| >>> print(result) | |||||
| (3, 1, 5, 6) | (3, 1, 5, 6) | ||||
| """ | """ | ||||
| @@ -661,7 +661,8 @@ class FusedBatchNorm(Primitive): | |||||
| >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) | >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) | ||||
| >>> net = FusedBatchNormNet() | >>> net = FusedBatchNormNet() | ||||
| >>> output = net(input_x) | >>> output = net(input_x) | ||||
| >>> output[0].shape | |||||
| >>> result = output[0].shape | |||||
| >>> print(result) | |||||
| (128, 64, 32, 64) | (128, 64, 32, 64) | ||||
| """ | """ | ||||
| __mindspore_signature__ = ( | __mindspore_signature__ = ( | ||||
| @@ -754,7 +755,8 @@ class FusedBatchNormEx(PrimitiveWithInfer): | |||||
| >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) | >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) | ||||
| >>> net = FusedBatchNormExNet() | >>> net = FusedBatchNormExNet() | ||||
| >>> output = net(input_x) | >>> output = net(input_x) | ||||
| >>> output[0].shape | |||||
| >>> result = output[0].shape | |||||
| >>> print(result) | |||||
| (128, 64, 32, 64) | (128, 64, 32, 64) | ||||
| """ | """ | ||||
| __mindspore_signature__ = ( | __mindspore_signature__ = ( | ||||
| @@ -1803,6 +1805,7 @@ class TopK(PrimitiveWithInfer): | |||||
| >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16) | >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16) | ||||
| >>> k = 3 | >>> k = 3 | ||||
| >>> values, indices = topk(input_x, k) | >>> values, indices = topk(input_x, k) | ||||
| >>> print((values, indices)) | |||||
| ([5.0, 4.0, 3.0], [4, 3, 2]) | ([5.0, 4.0, 3.0], [4, 3, 2]) | ||||
| """ | """ | ||||
| @@ -38,7 +38,8 @@ class StandardNormal(PrimitiveWithInfer): | |||||
| >>> shape = (4, 16) | >>> shape = (4, 16) | ||||
| >>> stdnormal = P.StandardNormal(seed=2) | >>> stdnormal = P.StandardNormal(seed=2) | ||||
| >>> output = stdnormal(shape) | >>> output = stdnormal(shape) | ||||
| >>> output.shape | |||||
| >>> result = output.shape | |||||
| >>> print(result) | |||||
| (4, 16) | (4, 16) | ||||
| """ | """ | ||||
| @@ -85,7 +86,8 @@ class StandardLaplace(PrimitiveWithInfer): | |||||
| >>> shape = (4, 16) | >>> shape = (4, 16) | ||||
| >>> stdlaplace = P.StandardLaplace(seed=2) | >>> stdlaplace = P.StandardLaplace(seed=2) | ||||
| >>> output = stdlaplace(shape) | >>> output = stdlaplace(shape) | ||||
| >>> output.shape | |||||
| >>> result = output.shape | |||||
| >>> print(result) | |||||
| (4, 16) | (4, 16) | ||||
| """ | """ | ||||
| @@ -354,9 +356,11 @@ class RandomChoiceWithMask(PrimitiveWithInfer): | |||||
| >>> rnd_choice_mask = P.RandomChoiceWithMask() | >>> rnd_choice_mask = P.RandomChoiceWithMask() | ||||
| >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) | >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) | ||||
| >>> output_y, output_mask = rnd_choice_mask(input_x) | >>> output_y, output_mask = rnd_choice_mask(input_x) | ||||
| >>> output_y.shape | |||||
| >>> result = output_y.shape | |||||
| >>> print(result) | |||||
| (256, 2) | (256, 2) | ||||
| >>> output_mask.shape | |||||
| >>> result = output_mask.shape | |||||
| >>> print(reuslt) | |||||
| (256,) | (256,) | ||||
| """ | """ | ||||