Browse Source

!841 fix bugs of Acosh bprop and Elu bprop

Merge pull request !841 from zhangbuxue/adapt_ExtractImagePatches_and_fix_EluGrad
tags/v0.3.0-alpha
mindspore-ci-bot Gitee 5 years ago
parent
commit
728345fdd0
5 changed files with 7 additions and 5 deletions
  1. +1
    -1
      mindspore/_checkparam.py
  2. +1
    -1
      mindspore/ops/_grad/grad_math_ops.py
  3. +1
    -1
      mindspore/ops/_grad/grad_nn_ops.py
  4. +3
    -1
      mindspore/ops/operations/nn_ops.py
  5. +1
    -1
      tests/ut/python/ops/test_nn_ops_check.py

+ 1
- 1
mindspore/_checkparam.py View File

@@ -128,7 +128,7 @@ class Validator:


@staticmethod @staticmethod
def check_number(arg_name, arg_value, value, rel, prim_name): def check_number(arg_name, arg_value, value, rel, prim_name):
"""Integer value judgment."""
"""Number value judgment."""
rel_fn = Rel.get_fns(rel) rel_fn = Rel.get_fns(rel)
if not rel_fn(arg_value, value): if not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(value) rel_str = Rel.get_strs(rel).format(value)


+ 1
- 1
mindspore/ops/_grad/grad_math_ops.py View File

@@ -727,7 +727,7 @@ def get_bprop_acosh(self):
input_grad = G.AcoshGrad() input_grad = G.AcoshGrad()


def bprop(x, out, dout): def bprop(x, out, dout):
dx = input_grad(x, dout)
dx = input_grad(out, dout)
return (dx,) return (dx,)
return bprop return bprop




+ 1
- 1
mindspore/ops/_grad/grad_nn_ops.py View File

@@ -281,7 +281,7 @@ def get_bprop_elu(self):
input_grad = G.EluGrad() input_grad = G.EluGrad()


def bprop(x, out, dout): def bprop(x, out, dout):
dx = input_grad(dout, x)
dx = input_grad(dout, out)
return (dx,) return (dx,)


return bprop return bprop


+ 3
- 1
mindspore/ops/operations/nn_ops.py View File

@@ -308,7 +308,8 @@ class Elu(PrimitiveWithInfer):
The data type of input tensor should be float. The data type of input tensor should be float.


Args: Args:
alpha (float): The coefficient of negative factor whose type is float. Default: 1.0.
alpha (float): The coefficient of negative factor whose type is float,
only support '1.0' currently. Default: 1.0.


Inputs: Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float. - **input_x** (Tensor) - The input tensor whose data type should be float.
@@ -328,6 +329,7 @@ class Elu(PrimitiveWithInfer):
def __init__(self, alpha=1.0): def __init__(self, alpha=1.0):
"""Init Elu""" """Init Elu"""
validator.check_value_type("alpha", alpha, [float], self.name) validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)


def infer_shape(self, input_x): def infer_shape(self, input_x):
return input_x return input_x


+ 1
- 1
tests/ut/python/ops/test_nn_ops_check.py View File

@@ -123,7 +123,7 @@ raise_set = [
'skip': ['backward']}), 'skip': ['backward']}),
# input is Tensor(int32) # input is Tensor(int32)
('Elu1', { ('Elu1', {
'block': (P.Elu(alpha=0.9), {'exception': TypeError, 'error_keywords': ['Elu']}),
'block': (P.Elu(), {'exception': TypeError, 'error_keywords': ['Elu']}),
'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
'skip': ['backward']}), 'skip': ['backward']}),




Loading…
Cancel
Save