| @@ -232,6 +232,21 @@ def get_bprop_div(self): | |||
| return bprop | |||
| @bprop_getters.register(P.DivNoNan) | |||
| def get_bprop_div_no_nan(self): | |||
| """Grad definition for `DivNoNan` operation.""" | |||
| div_no_nan_op = P.DivNoNan() | |||
| neg = P.Neg() | |||
| mul_op = P.Mul() | |||
| def bprop(x, y, out, dout): | |||
| bc_x = div_no_nan_op(dout, y) | |||
| bc_y = neg(mul_op(bc_x, out)) | |||
| return binop_grad_common(x, y, bc_x, bc_y) | |||
| return bprop | |||
| @bprop_getters.register(P.Floor) | |||
| def get_bprop_floor(self): | |||
| """Grad definition for `floor` operation.""" | |||
| @@ -225,6 +225,7 @@ from .asin import _asin_tbe | |||
| from .asin_grad import _asin_grad_tbe | |||
| from .asinh import _asinh_tbe | |||
| from .asinh_grad import _asinh_grad_tbe | |||
| from .div_no_nan import _div_no_nan_tbe | |||
| from .atan import _atan_tbe | |||
| from .atan_grad import _atan_grad_tbe | |||
| from .atanh import _atanh_tbe | |||
| @@ -0,0 +1,45 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """DivNoNan op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| div_no_nan_op_info = TBERegOp("DivNoNan") \ | |||
| .fusion_type("ELEMWISE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("div_no_nan.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("div_no_nan") \ | |||
| .partial_flag(True) \ | |||
| .input(0, "x1", False, "required", "all") \ | |||
| .input(1, "x2", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ | |||
| .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ | |||
| .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ | |||
| .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ | |||
| .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ | |||
| .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ | |||
| .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ | |||
| .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ | |||
| .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ | |||
| .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ | |||
| .get_op_info() | |||
| @op_info_register(div_no_nan_op_info) | |||
| def _div_no_nan_tbe(): | |||
| """DivNoNan TBE register""" | |||
| return | |||
| @@ -43,7 +43,7 @@ from .inner_ops import ScalarCast | |||
| from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, | |||
| BitwiseXor, | |||
| ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, | |||
| Cos, Div, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, | |||
| Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, | |||
| Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, | |||
| LogicalNot, LogicalOr, MatMul, Maximum, | |||
| Minimum, Mul, Neg, NMSWithMask, NotEqual, | |||
| @@ -141,6 +141,7 @@ __all__ = [ | |||
| 'LessEqual', | |||
| 'RealDiv', | |||
| 'Div', | |||
| 'DivNoNan', | |||
| 'TruncatedNormal', | |||
| 'Fill', | |||
| 'OnesLike', | |||
| @@ -1217,8 +1217,8 @@ class RealDiv(_MathBinaryOp): | |||
| and the type of the scalar is the same as the data type of the tensor. | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. | |||
| - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or | |||
| - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number. | |||
| - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is the same as 'input_x' or | |||
| a number. | |||
| Outputs: | |||
| @@ -1253,12 +1253,12 @@ class Div(_MathBinaryOp): | |||
| and the type of the scalar is the same as the data type of the tensor. | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. | |||
| - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or | |||
| - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number. | |||
| - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is the same as 'input_x' or | |||
| a number. | |||
| Outputs: | |||
| Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. | |||
| Tensor, the shape is same as the shape after broadcasting, and the data type is the same as 'input_x'. | |||
| Raises: | |||
| ValueError: When `input_x` and `input_y` are not the same dtype. | |||
| @@ -1278,6 +1278,46 @@ class Div(_MathBinaryOp): | |||
| return None | |||
| class DivNoNan(_MathBinaryOp): | |||
| """ | |||
| Computes a safe divide which returns 0 if the y is zero. | |||
| The inputs must be two tensors or one tensor and one scalar. | |||
| When the inputs are two tensors, the shapes of them could be broadcast, | |||
| and the data types of them should be same. | |||
| When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant, | |||
| and the type of the scalar is the same as the data type of the tensor. | |||
| Inputs: | |||
| - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number. | |||
| - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is the same as 'input_x' or | |||
| a number. | |||
| Outputs: | |||
| Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. | |||
| Raises: | |||
| ValueError: When `input_x` and `input_y` are not the same dtype. | |||
| Examples: | |||
| >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) | |||
| >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32) | |||
| >>> div_no_nan = P.DivNoNan() | |||
| >>> div_no_nan(input_x, input_y) | |||
| [0., 0., 0., 2.5, 2.0] | |||
| """ | |||
| def infer_value(self, x, y): | |||
| if x is not None and y is not None: | |||
| x = x.asnumpy() | |||
| y = y.asnumpy() | |||
| with np.errstate(divide='ignore', invalid='ignore'): | |||
| out = np.true_divide(x, y) | |||
| out[~np.isfinite(out)] = 0 | |||
| return out | |||
| return None | |||
| class FloorDiv(_MathBinaryOp): | |||
| """ | |||
| Divide the first input tensor by the second input tensor element-wise and rounds down to the closest integer. | |||