Merge pull request !5689 from lihongkang/lhk_mastertags/v1.0.0
| @@ -220,7 +220,7 @@ class Adam(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> optm = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. | >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -168,7 +168,7 @@ class LazyAdam(Optimizer): | |||||
| >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, | ||||
| >>> {'params': no_conv_params, 'lr': 0.01}, | >>> {'params': no_conv_params, 'lr': 0.01}, | ||||
| >>> {'order_params': net.trainable_params()}] | >>> {'order_params': net.trainable_params()}] | ||||
| >>> opt = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) | |||||
| >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. | ||||
| >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. | >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. | ||||
| >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. | ||||
| @@ -3013,12 +3013,12 @@ class DepthToSpace(PrimitiveWithInfer): | |||||
| This is the reverse operation of SpaceToDepth. | This is the reverse operation of SpaceToDepth. | ||||
| The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`. | |||||
| The output tensor's `height` dimension is :math:`height * block\_size`. | The output tensor's `height` dimension is :math:`height * block\_size`. | ||||
| The output tensor's `weight` dimension is :math:`weight * block\_size`. | The output tensor's `weight` dimension is :math:`weight * block\_size`. | ||||
| The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`. | |||||
| The input tensor's depth must be divisible by `block_size * block_size`. | The input tensor's depth must be divisible by `block_size * block_size`. | ||||
| The data format is "NCHW". | The data format is "NCHW". | ||||
| @@ -3029,7 +3029,7 @@ class DepthToSpace(PrimitiveWithInfer): | |||||
| - **x** (Tensor) - The target tensor. It must be a 4-D tensor. | - **x** (Tensor) - The target tensor. It must be a 4-D tensor. | ||||
| Outputs: | Outputs: | ||||
| Tensor, the same type as `x`. | |||||
| Tensor, has the same shape and dtype as the 'x'. | |||||
| Examples: | Examples: | ||||
| >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) | >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) | ||||
| @@ -741,6 +741,7 @@ class CumSum(PrimitiveWithInfer): | |||||
| Inputs: | Inputs: | ||||
| - **input** (Tensor) - The input tensor to accumulate. | - **input** (Tensor) - The input tensor to accumulate. | ||||
| - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed. | - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed. | ||||
| Must be in the range [-rank(input), rank(input)). | |||||
| Outputs: | Outputs: | ||||
| Tensor, the shape of the output tensor is consistent with the input tensor's. | Tensor, the shape of the output tensor is consistent with the input tensor's. | ||||
| @@ -1764,6 +1765,7 @@ class Div(_MathBinaryOp): | |||||
| >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) | ||||
| >>> div = P.Div() | >>> div = P.Div() | ||||
| >>> div(input_x, input_y) | >>> div(input_x, input_y) | ||||
| [-1.3, 2.5, 2.0] | |||||
| """ | """ | ||||
| def infer_value(self, x, y): | def infer_value(self, x, y): | ||||