| @@ -378,7 +378,7 @@ class PSNR(Cell): | |||||
| >>> img1 = Tensor(np.random.random((1,3,16,16))) | >>> img1 = Tensor(np.random.random((1,3,16,16))) | ||||
| >>> img2 = Tensor(np.random.random((1,3,16,16))) | >>> img2 = Tensor(np.random.random((1,3,16,16))) | ||||
| >>> psnr = net(img1, img2) | >>> psnr = net(img1, img2) | ||||
| [7.8297315] | |||||
| """ | """ | ||||
| def __init__(self, max_val=1.0): | def __init__(self, max_val=1.0): | ||||
| super(PSNR, self).__init__() | super(PSNR, self).__init__() | ||||
| @@ -137,6 +137,7 @@ class MSELoss(_Loss): | |||||
| >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) | ||||
| >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) | >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) | ||||
| >>> loss(input_data, target_data) | >>> loss(input_data, target_data) | ||||
| 0.33333334 | |||||
| """ | """ | ||||
| def construct(self, base, target): | def construct(self, base, target): | ||||
| x = F.square(base - target) | x = F.square(base - target) | ||||
| @@ -79,10 +79,12 @@ def normal(shape, mean, stddev, seed=None): | |||||
| The dtype is float32. | The dtype is float32. | ||||
| Examples: | Examples: | ||||
| >>> shape = (4, 16) | |||||
| >>> shape = (2, 4) | |||||
| >>> mean = Tensor(1.0, mstype.float32) | >>> mean = Tensor(1.0, mstype.float32) | ||||
| >>> stddev = Tensor(1.0, mstype.float32) | >>> stddev = Tensor(1.0, mstype.float32) | ||||
| >>> output = C.normal(shape, mean, stddev, seed=5) | >>> output = C.normal(shape, mean, stddev, seed=5) | ||||
| [[1.0996436 0.44371283 0.11127508 -0.48055804] | |||||
| [0.31989878 -1.0644426 1.5076542 1.2290289 ]] | |||||
| """ | """ | ||||
| mean_dtype = F.dtype(mean) | mean_dtype = F.dtype(mean) | ||||
| stddev_dtype = F.dtype(stddev) | stddev_dtype = F.dtype(stddev) | ||||
| @@ -446,6 +446,7 @@ class Shape(Primitive): | |||||
| >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | ||||
| >>> shape = P.Shape() | >>> shape = P.Shape() | ||||
| >>> output = shape(input_tensor) | >>> output = shape(input_tensor) | ||||
| (3, 2, 1) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -501,6 +502,9 @@ class Squeeze(PrimitiveWithInfer): | |||||
| >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) | ||||
| >>> squeeze = P.Squeeze(2) | >>> squeeze = P.Squeeze(2) | ||||
| >>> output = squeeze(input_tensor) | >>> output = squeeze(input_tensor) | ||||
| [[1. 1.] | |||||
| [1. 1.] | |||||
| [1. 1.]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -748,6 +752,10 @@ class Split(PrimitiveWithInfer): | |||||
| >>> split = P.Split(1, 2) | >>> split = P.Split(1, 2) | ||||
| >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) | >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) | ||||
| >>> output = split(x) | >>> output = split(x) | ||||
| ([[1, 1], | |||||
| [2, 2]], | |||||
| [[1, 1], | |||||
| [2, 2]]) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -828,7 +836,7 @@ class TruncatedNormal(PrimitiveWithInfer): | |||||
| - **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer. | - **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer. | ||||
| Outputs: | Outputs: | ||||
| Tensor, the dat type of output tensor is the same as attribute `dtype`. | |||||
| Tensor, the data type of output tensor is the same as attribute `dtype`. | |||||
| Examples: | Examples: | ||||
| >>> shape = (1, 2, 3) | >>> shape = (1, 2, 3) | ||||
| @@ -2451,6 +2459,9 @@ class ScatterNd(PrimitiveWithInfer): | |||||
| >>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32) | >>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32) | ||||
| >>> shape = (3, 3) | >>> shape = (3, 3) | ||||
| >>> output = op(indices, update, shape) | >>> output = op(indices, update, shape) | ||||
| [[0. 3.2 0.] | |||||
| [0. 1.1 0.] | |||||
| [0. 0. 0. ]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -2676,6 +2687,8 @@ class ScatterNdUpdate(_ScatterNdOp): | |||||
| >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) | >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) | ||||
| >>> op = P.ScatterNdUpdate() | >>> op = P.ScatterNdUpdate() | ||||
| >>> output = op(input_x, indices, update) | >>> output = op(input_x, indices, update) | ||||
| [[1. 0.3 3.6] | |||||
| [0.4 2.2 -3.2]] | |||||
| """ | """ | ||||
| @@ -3136,7 +3149,7 @@ class SpaceToBatch(PrimitiveWithInfer): | |||||
| Args: | Args: | ||||
| block_size (int): The block size of dividing blocks with value greater than 2. | block_size (int): The block size of dividing blocks with value greater than 2. | ||||
| paddings (list): The padding values for H and W dimension, containing 2 subtraction lists. | |||||
| paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction lists. | |||||
| Each subtraction list contains 2 integer value. All values must be greater than 0. | Each subtraction list contains 2 integer value. All values must be greater than 0. | ||||
| paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the | paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the | ||||
| input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] | input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] | ||||
| @@ -3284,7 +3297,7 @@ class SpaceToBatchND(PrimitiveWithInfer): | |||||
| Args: | Args: | ||||
| block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value greater than 1. | block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value greater than 1. | ||||
| The length of `block_shape` is M correspoding to the number of spatial dimensions. M must be 2. | The length of `block_shape` is M correspoding to the number of spatial dimensions. M must be 2. | ||||
| paddings (list): The padding values for H and W dimension, containing 2 subtraction list. | |||||
| paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction list. | |||||
| Each contains 2 integer value. All values must be greater than 0. | Each contains 2 integer value. All values must be greater than 0. | ||||
| `paddings[i]` specifies the paddings for the spatial dimension i, | `paddings[i]` specifies the paddings for the spatial dimension i, | ||||
| which corresponds to the input dimension i+2. | which corresponds to the input dimension i+2. | ||||
| @@ -413,6 +413,8 @@ class ReduceAll(_Reduce): | |||||
| >>> input_x = Tensor(np.array([[True, False], [True, True]])) | >>> input_x = Tensor(np.array([[True, False], [True, True]])) | ||||
| >>> op = P.ReduceAll(keep_dims=True) | >>> op = P.ReduceAll(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| [[False] | |||||
| [True ]] | |||||
| """ | """ | ||||
| def __infer__(self, input_x, axis): | def __infer__(self, input_x, axis): | ||||
| @@ -1942,6 +1944,7 @@ class Mod(_MathBinaryOp): | |||||
| >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | ||||
| >>> mod = P.Mod() | >>> mod = P.Mod() | ||||
| >>> mod(input_x, input_y) | >>> mod(input_x, input_y) | ||||
| [-1. 1. 0.] | |||||
| """ | """ | ||||
| def infer_value(self, x, y): | def infer_value(self, x, y): | ||||
| @@ -3269,6 +3272,7 @@ class SquareSumAll(PrimitiveWithInfer): | |||||
| >>> input_x2 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32) | >>> input_x2 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32) | ||||
| >>> square_sum_all = P.SquareSumAll() | >>> square_sum_all = P.SquareSumAll() | ||||
| >>> square_sum_all(input_x1, input_x2) | >>> square_sum_all(input_x1, input_x2) | ||||
| (27, 26) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -2955,6 +2955,10 @@ class MirrorPad(PrimitiveWithInfer): | |||||
| >>> paddings = Tensor([[1,1],[2,2]]) | >>> paddings = Tensor([[1,1],[2,2]]) | ||||
| >>> pad = Net() | >>> pad = Net() | ||||
| >>> ms_output = pad(Tensor(x), paddings) | >>> ms_output = pad(Tensor(x), paddings) | ||||
| [[0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] | |||||
| [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ] | |||||
| [0.5525309 0.49183875 0.99110144 0.49183875 0.5525309 0.49183875 0.99110144] | |||||
| [0.31417271 0.96308136 0.934709 0.96308136 0.31417271 0.96308136 0.934709 ]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -4196,15 +4200,16 @@ class SparseApplyAdagrad(PrimitiveWithInfer): | |||||
| >>> def __init__(self): | >>> def __init__(self): | ||||
| >>> super(Net, self).__init__() | >>> super(Net, self).__init__() | ||||
| >>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8) | >>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8) | ||||
| >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum") | |||||
| >>> self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") | |||||
| >>> def construct(self, grad, indices): | >>> def construct(self, grad, indices): | ||||
| >>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices) | >>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices) | ||||
| >>> return out | >>> return out | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) | |||||
| >>> indices = Tensor([0, 1, 2], mstype.int32) | |||||
| >>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32)) | |||||
| >>> indices = Tensor([0], mstype.int32) | |||||
| >>> result = net(grad, indices) | >>> result = net(grad, indices) | ||||
| ([[[1.0]]], [[[1.0]]]) | |||||
| """ | """ | ||||
| __mindspore_signature__ = ( | __mindspore_signature__ = ( | ||||
| @@ -4283,16 +4288,17 @@ class SparseApplyAdagradV2(PrimitiveWithInfer): | |||||
| >>> def __init__(self): | >>> def __init__(self): | ||||
| >>> super(Net, self).__init__() | >>> super(Net, self).__init__() | ||||
| >>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6) | >>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6) | ||||
| >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum") | |||||
| >>> self.var = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.ones([1, 1, 1]).astype(np.float32)), name="accum") | |||||
| >>> | >>> | ||||
| >>> def construct(self, grad, indices): | >>> def construct(self, grad, indices): | ||||
| >>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices) | >>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices) | ||||
| >>> return out | >>> return out | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) | |||||
| >>> indices = Tensor([0, 1, 2], mstype.int32) | |||||
| >>> grad = Tensor(np.random.rand(1, 1, 1).astype(np.float32)) | |||||
| >>> indices = Tensor([0], mstype.int32) | |||||
| >>> result = net(grad, indices) | >>> result = net(grad, indices) | ||||
| ([[[1.0]]], [[[1.67194188]]]) | |||||
| """ | """ | ||||
| __mindspore_signature__ = ( | __mindspore_signature__ = ( | ||||
| @@ -5089,18 +5095,19 @@ class SparseApplyFtrl(PrimitiveWithCheck): | |||||
| >>> def __init__(self): | >>> def __init__(self): | ||||
| >>> super(SparseApplyFtrlNet, self).__init__() | >>> super(SparseApplyFtrlNet, self).__init__() | ||||
| >>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) | >>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5) | ||||
| >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum") | |||||
| >>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear") | |||||
| >>> self.var = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="var") | |||||
| >>> self.accum = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="accum") | |||||
| >>> self.linear = Parameter(Tensor(np.random.rand(1, 1).astype(np.float32)), name="linear") | |||||
| >>> | >>> | ||||
| >>> def construct(self, grad, indices): | >>> def construct(self, grad, indices): | ||||
| >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) | >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices) | ||||
| >>> return out | >>> return out | ||||
| >>> | >>> | ||||
| >>> net = SparseApplyFtrlNet() | >>> net = SparseApplyFtrlNet() | ||||
| >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32)) | |||||
| >>> indices = Tensor(np.ones([3]), mindspore.int32) | |||||
| >>> grad = Tensor(np.random.rand(1, 1).astype(np.float32)) | |||||
| >>> indices = Tensor(np.ones([1]), mindspore.int32) | |||||
| >>> output = net(grad, indices) | >>> output = net(grad, indices) | ||||
| ([[1.02914639e-01]], [[7.60280550e-01]], [[7.64630079e-01]]) | |||||
| """ | """ | ||||
| __mindspore_signature__ = ( | __mindspore_signature__ = ( | ||||
| @@ -341,6 +341,10 @@ class RandomChoiceWithMask(PrimitiveWithInfer): | |||||
| >>> rnd_choice_mask = P.RandomChoiceWithMask() | >>> rnd_choice_mask = P.RandomChoiceWithMask() | ||||
| >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) | >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) | ||||
| >>> output_y, output_mask = rnd_choice_mask(input_x) | >>> output_y, output_mask = rnd_choice_mask(input_x) | ||||
| >>> output_y.shape | |||||
| (256, 2) | |||||
| >>> output_mask.shape | |||||
| (256,) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||