Browse Source

!2810 Add operator adapting in ME for Softsign

Merge pull request !2810 from zhangzheng/softsign
tags/v0.6.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
2cd9649b9e
6 changed files with 94 additions and 1 deletions
  1. +15
    -0
      mindspore/ops/_grad/grad_nn_ops.py
  2. +1
    -0
      mindspore/ops/_op_impl/tbe/__init__.py
  3. +37
    -0
      mindspore/ops/_op_impl/tbe/softsign.py
  4. +2
    -1
      mindspore/ops/operations/__init__.py
  5. +35
    -0
      mindspore/ops/operations/nn_ops.py
  6. +4
    -0
      tests/ut/python/ops/test_ops.py

+ 15
- 0
mindspore/ops/_grad/grad_nn_ops.py View File

@@ -336,6 +336,21 @@ def get_bprop_softplus(self):
return bprop


@bprop_getters.register(P.Softsign)
def get_bprop_softsign(self):
"""Grad definition for `Softsign` operation."""
mul = P.Mul()
absolute = P.Abs()
div = P.Div()
square = P.Square()

def bprop(x, out, dout):
dx = mul(dout, div(1, square(1 + absolute(x))))
return (dx,)

return bprop


@bprop_getters.register(P.Tanh)
def get_bprop_tanh(self):
"""Grad definition for `Tanh` operation."""


+ 1
- 0
mindspore/ops/_op_impl/tbe/__init__.py View File

@@ -122,6 +122,7 @@ from .round import _round_tbe
from .tanh import _tanh_tbe
from .tanh_grad import _tanh_grad_tbe
from .softmax import _softmax_tbe
from .softsign import _softsign_tbe
from .softplus import _softplus_tbe
from .softplus_grad import _softplus_grad_tbe
from .softmax_grad_ext import _softmax_grad_ext_tbe


+ 37
- 0
mindspore/ops/_op_impl/tbe/softsign.py View File

@@ -0,0 +1,37 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Softsign op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType

softsign_op_info = TBERegOp("Softsign") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("softsign.so") \
.compute_cost(10) \
.kernel_name("softsign") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()


@op_info_register(softsign_op_info)
def _softsign_tbe():
"""Softsign TBE register"""
return

+ 2
- 1
mindspore/ops/operations/__init__.py View File

@@ -68,7 +68,7 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid,
SigmoidCrossEntropyWithLogits,
SmoothL1Loss, Softmax, Softplus, LRN,
SmoothL1Loss, Softmax, Softsign, Softplus, LRN,
SoftmaxCrossEntropyWithLogits, ROIAlign,
SparseSoftmaxCrossEntropyWithLogits, Tanh,
TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, SparseApplyFtrl,
@@ -115,6 +115,7 @@ __all__ = [
'SparseApplyLazyAdam',
'Softplus',
'Softmax',
'Softsign',
'LogSoftmax',
'SoftmaxCrossEntropyWithLogits',
'ROIAlign',


+ 35
- 0
mindspore/ops/operations/nn_ops.py View File

@@ -224,6 +224,41 @@ class Softplus(PrimitiveWithInfer):
return input_x


class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.

The function is shown as follows:

.. math::
\text{output} = \frac{\text{input_x}}{1 + \abs{\text{input_x}}},

Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.

Outputs:
Tensor, with the same type and shape as the `input_x`.

Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""

@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])

def infer_shape(self, input_x):
return input_x

def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x


class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.


+ 4
- 0
tests/ut/python/ops/test_ops.py View File

@@ -1376,6 +1376,10 @@ test_case_nn_ops = [
'block': P.Softmax(),
'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}),
('Softsign', {
'block': P.Softsign(),
'desc_inputs': [[5, 5]],
'desc_bprop': [[5, 5]]}),
('DepthwiseConv2dNative_1', {
'block': P.DepthwiseConv2dNative(3, (3, 3), pad_mode="pad", pad=1, stride=2),
'desc_inputs': [[10, 32, 32, 32], [1, 32, 3, 3]],


Loading…
Cancel
Save