Merge pull request !8231 from lihongkang/v2_mastertags/v1.1.0
| @@ -185,7 +185,7 @@ class MaxPool1d(_PoolNd): | |||||
| Examples: | Examples: | ||||
| >>> max_pool = nn.MaxPool1d(kernel_size=3, strides=1) | >>> max_pool = nn.MaxPool1d(kernel_size=3, strides=1) | ||||
| >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32) | >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32) | ||||
| >>> output = pool(x) | |||||
| >>> output = max_pool(x) | |||||
| >>> output.shape | >>> output.shape | ||||
| (1, 2, 2) | (1, 2, 2) | ||||
| """ | """ | ||||
| @@ -20,7 +20,6 @@ from ..common import dtype as mstype | |||||
| from ..ops import operations as P | from ..ops import operations as P | ||||
| from .cell import Cell | from .cell import Cell | ||||
| from .._checkparam import Validator as validator | from .._checkparam import Validator as validator | ||||
| from .._checkparam import Rel | |||||
| class LearningRateSchedule(Cell): | class LearningRateSchedule(Cell): | ||||
| @@ -246,7 +245,7 @@ class CosineDecayLR(LearningRateSchedule): | |||||
| >>> min_lr = 0.01 | >>> min_lr = 0.01 | ||||
| >>> max_lr = 0.1 | >>> max_lr = 0.1 | ||||
| >>> decay_steps = 4 | >>> decay_steps = 4 | ||||
| >>> global_step = Tensor(2, mstype.int32) | |||||
| >>> global_steps = Tensor(2, mstype.int32) | |||||
| >>> cosine_decay_lr = CosineDecayLR(min_lr, max_lr, decay_steps) | >>> cosine_decay_lr = CosineDecayLR(min_lr, max_lr, decay_steps) | ||||
| >>> cosine_decay_lr(global_steps) | >>> cosine_decay_lr(global_steps) | ||||
| """ | """ | ||||
| @@ -543,7 +543,7 @@ class CosineEmbeddingLoss(_Loss): | |||||
| >>> x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32) | >>> x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]), mindspore.float32) | ||||
| >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) | >>> x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]), mindspore.float32) | ||||
| >>> y = Tensor(np.array([1,-1]), mindspore.int32) | >>> y = Tensor(np.array([1,-1]), mindspore.int32) | ||||
| >>> cosine_embedding_loss = P.CosineEmbeddingLoss() | |||||
| >>> cosine_embedding_loss = nn.CosineEmbeddingLoss() | |||||
| >>> cosine_embedding_loss(x1, x2, y) | >>> cosine_embedding_loss(x1, x2, y) | ||||
| [0.0003426671] | [0.0003426671] | ||||
| """ | """ | ||||
| @@ -1125,6 +1125,7 @@ class ScalarToArray(PrimitiveWithInfer): | |||||
| >>> op = P.ScalarToArray() | >>> op = P.ScalarToArray() | ||||
| >>> data = 1.0 | >>> data = 1.0 | ||||
| >>> output = op(data) | >>> output = op(data) | ||||
| 1.0 | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -1156,6 +1157,7 @@ class ScalarToTensor(PrimitiveWithInfer): | |||||
| >>> op = P.ScalarToTensor() | >>> op = P.ScalarToTensor() | ||||
| >>> data = 1 | >>> data = 1 | ||||
| >>> output = op(data, mindspore.float32) | >>> output = op(data, mindspore.float32) | ||||
| 1.0 | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -2987,7 +2989,7 @@ class ScatterMul(_ScatterOp): | |||||
| Examples: | Examples: | ||||
| >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x") | >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x") | ||||
| >>> indices = Tensor(np.array([0, 1]), mindspore.int32) | >>> indices = Tensor(np.array([0, 1]), mindspore.int32) | ||||
| >>> updates = Tensor(np.ones([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32) | |||||
| >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32) | |||||
| >>> scatter_mul = P.ScatterMul() | >>> scatter_mul = P.ScatterMul() | ||||
| >>> output = scatter_mul(input_x, indices, updates) | >>> output = scatter_mul(input_x, indices, updates) | ||||
| [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] | [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] | ||||
| @@ -3496,7 +3498,7 @@ class BatchToSpaceND(PrimitiveWithInfer): | |||||
| This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension | This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension | ||||
| is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W | is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W | ||||
| dimension and block_shape with given amount to crop from dimension, respectively.B | |||||
| dimension and block_shape with given amount to crop from dimension, respectively. | |||||
| Args: | Args: | ||||
| block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value >= 1. | block_shape (Union[list(int), tuple(int)]): The block shape of dividing block with all value >= 1. | ||||
| @@ -35,6 +35,7 @@ class ScalarCast(PrimitiveWithInfer): | |||||
| Examples: | Examples: | ||||
| >>> scalar_cast = P.ScalarCast() | >>> scalar_cast = P.ScalarCast() | ||||
| >>> output = scalar_cast(255.0, mindspore.int32) | >>> output = scalar_cast(255.0, mindspore.int32) | ||||
| 255 | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -495,6 +495,8 @@ class ReduceMax(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceMax(keep_dims=True) | >>> op = P.ReduceMax(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| (3, 1, 5, 6) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -572,6 +574,8 @@ class ReduceProd(_Reduce): | |||||
| >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) | ||||
| >>> op = P.ReduceProd(keep_dims=True) | >>> op = P.ReduceProd(keep_dims=True) | ||||
| >>> output = op(input_x, 1) | >>> output = op(input_x, 1) | ||||
| >>> output.shape | |||||
| (3, 1, 5, 6) | |||||
| """ | """ | ||||
| @@ -730,8 +734,7 @@ class BatchMatMul(MatMul): | |||||
| [[[[3. 3. 3. 3.]] | [[[[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]]] | |||||
| [[3. 3. 3. 3.]]], | |||||
| [[[3. 3. 3. 3.]] | [[[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| @@ -744,8 +747,7 @@ class BatchMatMul(MatMul): | |||||
| [[[[3. 3. 3. 3.]] | [[[[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]]] | |||||
| [[3. 3. 3. 3.]]], | |||||
| [[[3. 3. 3. 3.]] | [[[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| [[3. 3. 3. 3.]] | [[3. 3. 3. 3.]] | ||||
| @@ -3683,11 +3685,11 @@ class IFMR(PrimitiveWithInfer): | |||||
| The TFMR(Input Feature Map Reconstruction). | The TFMR(Input Feature Map Reconstruction). | ||||
| Args: | Args: | ||||
| min_percentile (float): Min init percentile. | |||||
| max_percentile (float): Max init percentile. | |||||
| search_range Union[list(float), tuple(float)]: Range of searching. | |||||
| search_step (float): Step size of searching. | |||||
| with_offset (bool): Whether using offset. | |||||
| min_percentile (float): Min init percentile. Default: 0.999999. | |||||
| max_percentile (float): Max init percentile. Default: 0.999999. | |||||
| search_range Union[list(float), tuple(float)]: Range of searching. Default: [0.7, 1.3]. | |||||
| search_step (float): Step size of searching. Default: 0.01. | |||||
| with_offset (bool): Whether using offset. Default: True. | |||||
| Inputs: | Inputs: | ||||
| - **data** (Tensor) - A Tensor of feature map. With float16 or float32 data type. | - **data** (Tensor) - A Tensor of feature map. With float16 or float32 data type. | ||||
| @@ -3709,10 +3711,12 @@ class IFMR(PrimitiveWithInfer): | |||||
| >>> ifmr = P.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0), | >>> ifmr = P.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0), | ||||
| search_step=1.0, with_offset=False) | search_step=1.0, with_offset=False) | ||||
| >>> output = ifmr(data, data_min, data_max, cumsum) | >>> output = ifmr(data, data_min, data_max, cumsum) | ||||
| ([7.87401572e-03], [0.00000000e+00]) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self, min_percentile, max_percentile, search_range, search_step, with_offset): | |||||
| def __init__(self, min_percentile=0.999999, max_percentile=0.999999, search_range=(0.7, 1.3), search_step=0.01, | |||||
| with_offset=True): | |||||
| validator.check_value_type("min_percentile", min_percentile, [float], self.name) | validator.check_value_type("min_percentile", min_percentile, [float], self.name) | ||||
| validator.check_value_type("max_percentile", max_percentile, [float], self.name) | validator.check_value_type("max_percentile", max_percentile, [float], self.name) | ||||
| validator.check_value_type("search_range", search_range, [list, tuple], self.name) | validator.check_value_type("search_range", search_range, [list, tuple], self.name) | ||||
| @@ -319,6 +319,8 @@ class ReLU6(PrimitiveWithInfer): | |||||
| >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) | ||||
| >>> relu6 = P.ReLU6() | >>> relu6 = P.ReLU6() | ||||
| >>> result = relu6(input_x) | >>> result = relu6(input_x) | ||||
| [[0. 4. 0.] | |||||
| [2. 0. 6.]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -352,7 +354,7 @@ class ReLUV2(PrimitiveWithInfer): | |||||
| >>> relu_v2 = P.ReLUV2() | >>> relu_v2 = P.ReLUV2() | ||||
| >>> output = relu_v2(input_x) | >>> output = relu_v2(input_x) | ||||
| ([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]], | ([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]], | ||||
| [[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]]) | |||||
| [[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]]]) | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -892,7 +894,7 @@ class BatchNorm(PrimitiveWithInfer): | |||||
| - **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`. | - **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`. | ||||
| Examples: | Examples: | ||||
| >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32) | |||||
| >>> input_x = Tensor(np.ones([32, 64]), mindspore.float32) | |||||
| >>> scale = Tensor(np.ones([64]), mindspore.float32) | >>> scale = Tensor(np.ones([64]), mindspore.float32) | ||||
| >>> bias = Tensor(np.ones([64]), mindspore.float32) | >>> bias = Tensor(np.ones([64]), mindspore.float32) | ||||
| >>> mean = Tensor(np.ones([64]), mindspore.float32) | >>> mean = Tensor(np.ones([64]), mindspore.float32) | ||||
| @@ -2558,7 +2560,11 @@ class ResizeBilinear(PrimitiveWithInfer): | |||||
| >>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32) | >>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32) | ||||
| >>> resize_bilinear = P.ResizeBilinear((5, 5)) | >>> resize_bilinear = P.ResizeBilinear((5, 5)) | ||||
| >>> result = resize_bilinear(tensor) | >>> result = resize_bilinear(tensor) | ||||
| >>> assert result.shape == (1, 1, 5, 5) | |||||
| [[[[1. 2. 3. 4. 5.] | |||||
| [1. 2. 3. 4. 5.] | |||||
| [1. 2. 3. 4. 5.] | |||||
| [1. 2. 3. 4. 5.] | |||||
| [1. 2. 3. 4. 5.]]]] | |||||
| """ | """ | ||||
| @prim_attr_register | @prim_attr_register | ||||
| @@ -45,7 +45,7 @@ class Assign(PrimitiveWithCheck): | |||||
| >>> | >>> | ||||
| >>> def construct(self, x): | >>> def construct(self, x): | ||||
| >>> P.Assign()(self.y, x) | >>> P.Assign()(self.y, x) | ||||
| >>> return x | |||||
| >>> return self.y | |||||
| >>> x = Tensor([2.0], mindspore.float32) | >>> x = Tensor([2.0], mindspore.float32) | ||||
| >>> net = Net() | >>> net = Net() | ||||
| >>> net(x) | >>> net(x) | ||||