From c421fc1badd7a48291d2ca4c0a16ab2139a20de4 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Fri, 25 Sep 2020 10:20:33 +0800 Subject: [PATCH] fix bugs --- mindspore/nn/layer/activation.py | 4 +++- mindspore/ops/operations/array_ops.py | 5 +++-- mindspore/ops/operations/math_ops.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 11205b064c..d30d3c2a0a 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -22,6 +22,7 @@ from mindspore.common.tensor import Tensor from mindspore._extends import cell_attr_register from mindspore.ops import _selected_ops from ..cell import Cell +from ..._checkparam import Validator as validator __all__ = ['Softmax', @@ -228,7 +229,7 @@ class LeakyReLU(Cell): See https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf Args: - alpha (float): Slope of the activation function at x < 0. Default: 0.2. + alpha (Union[int, float]): Slope of the activation function at x < 0. Default: 0.2. Inputs: - **input_x** (Tensor) - The input of LeakyReLU. @@ -246,6 +247,7 @@ class LeakyReLU(Cell): def __init__(self, alpha=0.2): super(LeakyReLU, self).__init__() + validator.check_value_type('alpha', alpha, [float, int], self.cls_name) self.greater_equal = P.GreaterEqual() self.mul = P.Mul() self.alpha = alpha diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index be7e0ccb77..9c8fcce4eb 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1185,6 +1185,7 @@ class Argmax(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) >>> index = P.Argmax(output_type=mindspore.int32)(input_x) + 1 """ @prim_attr_register @@ -1192,7 +1193,7 @@ class Argmax(PrimitiveWithInfer): """Initialize Argmax""" self.init_prim_io_names(inputs=['x'], outputs=['output']) validator.check_value_type("axis", axis, [int], self.name) - validator.check_type_same({'output': output_type}, [mstype.int32, mstype.int64], self.name) + validator.check_type_same({'output': output_type}, [mstype.int32], self.name) self.axis = axis self.add_prim_attr('output_type', output_type) @@ -1996,7 +1997,7 @@ class Select(PrimitiveWithInfer): and :math:`y`. Inputs: - - **input_x** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N)`. + - **input_x** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`. The condition tensor, decides which element is chosen. - **input_y** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`. The first input tensor. diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 2d6dbc8b75..335c8c68b8 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -404,7 +404,7 @@ class ReduceAll(_Reduce): - If axis is (), and keep_dims is False, the output is a 0-D tensor representing the "logical and" of all elements in the input tensor. - - If axis is int, set as 2, and keep_dims is alse, + - If axis is int, set as 2, and keep_dims is False, the shape of output is :math:`(x_1, x_3, ..., x_R)`. - If axis is tuple(int), set as (2, 3), and keep_dims is False, the shape of output is :math:`(x_1, x_4, ..., x_R)`.