Merge pull request !18266 from 张璇/softshrinktags/v1.4.0
| @@ -371,6 +371,8 @@ inline const PrimitivePtr kLambApplyWeightAssign = std::make_shared<Primitive>(" | |||
| inline const PrimitivePtr kSoftmaxGradExt = std::make_shared<Primitive>("SoftmaxGradExt"); | |||
| inline const PrimitivePtr kSquareSumV1 = std::make_shared<Primitive>("SquareSumV1"); | |||
| inline const PrimitivePtr kFusedMulAdd = std::make_shared<Primitive>("FusedMulAdd"); | |||
| inline const PrimitivePtr kPrimSoftShrink = std::make_shared<Primitive>("SoftShrink"); | |||
| inline const PrimitivePtr kPrimSoftShrinkGrad = std::make_shared<Primitive>("SoftShrinkGrad"); | |||
| // Comm ops | |||
| inline const PrimitivePtr kPrimMirror = std::make_shared<Primitive>("_MirrorOperator"); | |||
| @@ -0,0 +1,63 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "ops/grad/soft_shrink_grad.h" | |||
| #include <set> | |||
| #include <algorithm> | |||
| #include <memory> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "ops/op_utils.h" | |||
| #include "utils/check_convert_utils.h" | |||
| #include "abstract/primitive_infer_map.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| namespace { | |||
| abstract::ShapePtr SoftShrinkGradInferShape(const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args) { | |||
| MS_EXCEPTION_IF_NULL(primitive); | |||
| CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, primitive->name()); | |||
| auto input_grad_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; | |||
| auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; | |||
| auto prim_name = primitive->name(); | |||
| CheckAndConvertUtils::Check("input_grad_shape", input_grad_shape, kEqual, "input_x_shape", input_x_shape, prim_name, | |||
| TypeError); | |||
| return std::make_shared<abstract::Shape>(input_grad_shape); | |||
| } | |||
| TypePtr SoftShrinkGradInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) { | |||
| for (const auto &item : input_args) { | |||
| MS_EXCEPTION_IF_NULL(item); | |||
| } | |||
| const std::set<TypePtr> valid_types = {kFloat16, kFloat32}; | |||
| std::map<std::string, TypePtr> types; | |||
| types.emplace("input_grad", input_args[0]->BuildType()); | |||
| types.emplace("input_x", input_args[1]->BuildType()); | |||
| return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); | |||
| } | |||
| } // namespace | |||
| AbstractBasePtr SoftShrinkGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args) { | |||
| return std::make_shared<abstract::AbstractTensor>(SoftShrinkGradInferType(primitive, input_args), | |||
| SoftShrinkGradInferShape(primitive, input_args)->shape()); | |||
| } | |||
| REGISTER_PRIMITIVE_EVAL_IMPL(SoftShrinkGrad, prim::kPrimSoftShrinkGrad, SoftShrinkGradInfer, nullptr, true); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,42 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CORE_OPS_SOFTSHRINK_GRAD_H_ | |||
| #define MINDSPORE_CORE_OPS_SOFTSHRINK_GRAD_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <vector> | |||
| #include <string> | |||
| #include "ops/primitive_c.h" | |||
| #include "abstract/abstract_value.h" | |||
| #include "utils/check_convert_utils.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| constexpr auto kNameSoftShrinkGrad = "SoftShrinkGrad"; | |||
| class SoftShrinkGrad : public PrimitiveC { | |||
| public: | |||
| SoftShrinkGrad() : PrimitiveC(kNameSoftShrinkGrad) { InitIOName({"input_grad", "input_x"}, {"output"}); } | |||
| ~SoftShrinkGrad() = default; | |||
| MS_DECLARE_PARENT(SoftShrinkGrad, PrimitiveC); | |||
| }; | |||
| AbstractBasePtr SoftShrinkGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args); | |||
| using PrimSoftShrinkGradPtr = std::shared_ptr<SoftShrinkGrad>; | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CORE_OPS_SOFTSHRINK_GRAD_H_ | |||
| @@ -0,0 +1,60 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "ops/soft_shrink.h" | |||
| #include <set> | |||
| #include <algorithm> | |||
| #include <memory> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "ops/op_utils.h" | |||
| #include "utils/check_convert_utils.h" | |||
| #include "abstract/primitive_infer_map.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| namespace { | |||
| abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) { | |||
| MS_EXCEPTION_IF_NULL(primitive); | |||
| CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, primitive->name()); | |||
| for (const auto &item : input_args) { | |||
| MS_EXCEPTION_IF_NULL(item); | |||
| } | |||
| auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape]; | |||
| return std::make_shared<abstract::Shape>(in_shape); | |||
| } | |||
| TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) { | |||
| MS_EXCEPTION_IF_NULL(prim); | |||
| CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); | |||
| if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { | |||
| MS_LOG(EXCEPTION) << "nullptr"; | |||
| } | |||
| const std::set<TypePtr> valid_types = {kFloat16, kFloat32}; | |||
| return CheckAndConvertUtils::CheckTensorTypeValid("input_x", input_args[0]->BuildType(), valid_types, prim->name()); | |||
| } | |||
| } // namespace | |||
| AbstractBasePtr SoftShrinkInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args) { | |||
| return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args), | |||
| InferShape(primitive, input_args)->shape()); | |||
| } | |||
| REGISTER_PRIMITIVE_EVAL_IMPL(SoftShrink, prim::kPrimSoftShrink, SoftShrinkInfer, nullptr, true); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,42 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CORE_OPS_SOFTSHRINK_H_ | |||
| #define MINDSPORE_CORE_OPS_SOFTSHRINK_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <vector> | |||
| #include <string> | |||
| #include "ops/primitive_c.h" | |||
| #include "abstract/abstract_value.h" | |||
| #include "utils/check_convert_utils.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| constexpr auto kNameSoftShrink = "SoftShrink"; | |||
| class SoftShrink : public PrimitiveC { | |||
| public: | |||
| SoftShrink() : PrimitiveC(kNameSoftShrink) { InitIOName({"input_x"}, {"output"}); } | |||
| ~SoftShrink() = default; | |||
| MS_DECLARE_PARENT(SoftShrink, PrimitiveC); | |||
| }; | |||
| AbstractBasePtr SoftShrinkInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args); | |||
| using PrimSoftShrinkPtr = std::shared_ptr<SoftShrink>; | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CORE_OPS_SOFTSHRINK_H_ | |||
| @@ -39,6 +39,7 @@ __all__ = ['Softmax', | |||
| 'HSwish', | |||
| 'ELU', | |||
| 'LogSigmoid', | |||
| 'SoftShrink', | |||
| ] | |||
| @@ -754,6 +755,53 @@ class LogSigmoid(Cell): | |||
| ret = self.log(rec_exp_neg_input_1) | |||
| return ret | |||
| class SoftShrink(Cell): | |||
| r""" | |||
| Applies the soft shrinkage function elementwise. | |||
| .. math:: | |||
| \text{SoftShrink}(x) = | |||
| \begin{cases} | |||
| x - \lambda, & \text{ if } x > \lambda \\ | |||
| x + \lambda, & \text{ if } x < -\lambda \\ | |||
| 0, & \text{ otherwise } | |||
| \end{cases} | |||
| Args: | |||
| lambd: the :math:`\lambda` must be no less than zero value for the Softshrink formulation. Default: 0.5. | |||
| Inputs: | |||
| - **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32. | |||
| Any number of additional dimensions. | |||
| Outputs: | |||
| Tensor, has the same shape and data type as `input_x`. | |||
| Raises: | |||
| TypeError: If lambd is not a float. | |||
| TypeError: If input_x is not a Tensor. | |||
| TypeError: If dtype of input_x is neither float16 nor float32. | |||
| ValueError: If lambd is less than 0. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mstype.float16) | |||
| >>> softshrink = nn.SoftShrink() | |||
| >>> output = softshrink(input_x) | |||
| >>> print(output) | |||
| [[ 0.02979 0.287 0.676 ] | |||
| [ 0.2837 0.1216 -0.6543 ]] | |||
| """ | |||
| def __init__(self, lambd=0.5): | |||
| super(SoftShrink, self).__init__() | |||
| self.softshrink = P.SoftShrink(lambd) | |||
| def construct(self, input_x): | |||
| output = self.softshrink(input_x) | |||
| return output | |||
| _activation = { | |||
| 'softmax': Softmax, | |||
| @@ -770,6 +818,7 @@ _activation = { | |||
| 'hswish': HSwish, | |||
| 'hsigmoid': HSigmoid, | |||
| 'logsigmoid': LogSigmoid, | |||
| 'softshrink': SoftShrink, | |||
| } | |||
| @@ -13,11 +13,12 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """Define the grad rules of neural network related operations.""" | |||
| from .._grad.grad_base import bprop_getters | |||
| from .. import operations as P | |||
| from ..composite.multitype_ops.zeros_like_impl import zeros_like | |||
| from ..operations import _grad_ops as G | |||
| @bprop_getters.register(P.CTCLossV2) | |||
| def get_bprop_ctc_loss_v2(self): | |||
| @@ -31,3 +32,15 @@ def get_bprop_ctc_loss_v2(self): | |||
| return grad, zeros_like(targets), zeros_like(input_lengths), zeros_like(target_lengths) | |||
| return bprop | |||
| @bprop_getters.register(P.SoftShrink) | |||
| def get_bprop_softshrink(self): | |||
| """Grad definition for `SoftShrink` operation.""" | |||
| input_grad = G.SoftShrinkGrad(self.lambd) | |||
| def bprop(input_x, out, dout): | |||
| dx = input_grad(dout, input_x) | |||
| return (dx,) | |||
| return bprop | |||
| @@ -389,3 +389,5 @@ from .not_equal_ds import _not_ds_equal_tbe | |||
| from .reciprocal_ds import _reciprocal_ds_tbe | |||
| from .ctc_loss_v2 import _ctc_loss_v2_tbe | |||
| from .ctc_loss_v2_grad import _ctc_loss_v2_grad_tbe | |||
| from .soft_shrink import _soft_shrink_tbe | |||
| from .soft_shrink_grad import _soft_shrink_grad_tbe | |||
| @@ -0,0 +1,36 @@ | |||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """SoftShrink op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| soft_shrink_op_info = TBERegOp("SoftShrink") \ | |||
| .fusion_type("ELEMWISE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("soft_shrink.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("soft_shrink") \ | |||
| .partial_flag(True) \ | |||
| .attr("lambd", "optional", "float", "all", "0.5") \ | |||
| .input(0, "input_x", False, "required", "all") \ | |||
| .output(0, "output_y", False, "required", "all") \ | |||
| .dtype_format(DataType.F16_Default, DataType.F16_Default) \ | |||
| .dtype_format(DataType.F32_Default, DataType.F32_Default) \ | |||
| .get_op_info() | |||
| @op_info_register(soft_shrink_op_info) | |||
| def _soft_shrink_tbe(): | |||
| """SoftShrink TBE register""" | |||
| return | |||
| @@ -0,0 +1,38 @@ | |||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """SoftShrinkGrad op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| soft_shrink_grad_op_info = TBERegOp("SoftShrinkGrad") \ | |||
| .fusion_type("OPAQUE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("soft_shrink_grad.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("soft_shrink_grad") \ | |||
| .partial_flag(True) \ | |||
| .attr("lambd", "optional", "float", "all", "0.5") \ | |||
| .input(0, "input_grad", False, "required", "all") \ | |||
| .input(1, "input_x", False, "required", "all") \ | |||
| .output(0, "output", False, "required", "all") \ | |||
| .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ | |||
| .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ | |||
| .get_op_info() | |||
| @op_info_register(soft_shrink_grad_op_info) | |||
| def _soft_shrink_grad_tbe(): | |||
| """SoftShrinkGrad TBE register""" | |||
| return | |||
| @@ -86,7 +86,7 @@ from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, Adam | |||
| FusedSparseFtrl, FusedSparseProximalAdagrad, | |||
| ApplyAdaMax, ApplyAdadelta, ApplyAdagrad, ApplyAdagradV2, | |||
| ApplyAddSign, ApplyPowerSign, ApplyGradientDescent, ApplyProximalGradientDescent, | |||
| ApplyRMSProp, ApplyCenteredRMSProp, BasicLSTMCell, InTopK, AdaptiveAvgPool2D) | |||
| ApplyRMSProp, ApplyCenteredRMSProp, BasicLSTMCell, InTopK, AdaptiveAvgPool2D, SoftShrink) | |||
| from . import _quant_ops | |||
| from ._quant_ops import * | |||
| from .other_ops import (Assign, InplaceAssign, IOU, BoundingBoxDecode, BoundingBoxEncode, | |||
| @@ -483,6 +483,7 @@ __all__ = [ | |||
| "TensorScatterMax", | |||
| "TensorScatterMin", | |||
| "TensorScatterSub", | |||
| "SoftShrink", | |||
| ] | |||
| __all__.sort() | |||
| @@ -2181,3 +2181,34 @@ class MaskedSelectGrad(PrimitiveWithInfer): | |||
| def infer_dtype(self, x, mask, grad): | |||
| return x | |||
| class SoftShrinkGrad(Primitive): | |||
| r""" | |||
| Gradients for SoftShrink operation. | |||
| Args: | |||
| lambd – The \lambdaλ (must be no less than zero) value for the Softshrink formulation. Default: 0.5. | |||
| Inputs: | |||
| - **input_grad** (Tensor) - The input gradient. | |||
| - **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32. | |||
| Any number of additional dimensions. | |||
| Outputs: | |||
| output - Tensor, has the same shape and data type as input_x. | |||
| Raises: | |||
| TypeError: If lambd is not a float. | |||
| TypeError: If dtype of input_x is neither float16 nor float32. | |||
| ValueError: If lambd is less than to 0. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| """ | |||
| @prim_attr_register | |||
| def __init__(self, lambd=0.5): | |||
| self.init_prim_io_names(inputs=['input_grad', 'input_x'], outputs=['output']) | |||
| validator.check_value_type("lambd", lambd, [float], self.name) | |||
| validator.check_number("lambd", lambd, 0, Rel.GE, self.name) | |||
| @@ -8670,3 +8670,49 @@ class Conv3DTranspose(PrimitiveWithInfer): | |||
| 'dtype': x['dtype'], | |||
| } | |||
| return out | |||
| class SoftShrink(Primitive): | |||
| r""" | |||
| Applies the soft shrinkage function elementwise. | |||
| .. math:: | |||
| \text{SoftShrink}(x) = | |||
| \begin{cases} | |||
| x - \lambda, & \text{ if } x > \lambda \\ | |||
| x + \lambda, & \text{ if } x < -\lambda \\ | |||
| 0, & \text{ otherwise } | |||
| \end{cases} | |||
| Args: | |||
| lambd: the :math:`\lambda` must be no less than zero value for the Softshrink formulation. Default: 0.5. | |||
| Inputs: | |||
| - **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32. | |||
| Any number of additional dimensions. | |||
| Outputs: | |||
| Tensor, has the same shape and data type as `input_x`. | |||
| Raises: | |||
| TypeError: If lambd is not a float. | |||
| TypeError: If input_x is not a Tensor. | |||
| TypeError: If dtype of input_x is neither float16 nor float32. | |||
| ValueError: If lambd is less than 0. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mstype.float16) | |||
| >>> softshrink = ops.SoftShrink() | |||
| >>> output = softshrink(input_x) | |||
| >>> print(output) | |||
| [[ 0.02979 0.287 0.676 ] | |||
| [ 0.2837 0.1216 -0.6543 ]] | |||
| """ | |||
| @prim_attr_register | |||
| def __init__(self, lambd=0.5): | |||
| """Initialize SoftShrink""" | |||
| validator.check_value_type("lambd", lambd, [float], self.name) | |||
| validator.check_number("lambd", lambd, 0, Rel.GE, self.name) | |||
| @@ -2145,6 +2145,16 @@ test_case_nn_ops = [ | |||
| Tensor(np.zeros((1, 1, 2, 2)), mstype.uint16)], | |||
| 'desc_bprop': [], | |||
| 'skip': ['backward']}), | |||
| ('SoftShrink', { | |||
| 'block': P.SoftShrink(), | |||
| 'desc_inputs': [Tensor(np.array([[0.5297, 0.7871, 1.1754], [0.7836, 0.6218, -1.1542]]), mstype.float32)], | |||
| 'desc_bprop': [Tensor(np.array([[0, 0.4, 1], [1, 2, 4]]), mstype.float32)]}), | |||
| ('SoftShrinkGrad', { | |||
| 'block': G.SoftShrinkGrad(), | |||
| 'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16), | |||
| Tensor(np.array([[-3, -2, 0], [1, 2, 4]]), mstype.float16)], | |||
| 'desc_bprop': [], | |||
| 'skip': ['backward']}), | |||
| ] | |||
| test_case_array_ops = [ | |||