From ad4996d528e8dfbeb36df7c40bb89e001c019e1f Mon Sep 17 00:00:00 2001 From: liuwenhao4 Date: Mon, 1 Jun 2020 19:14:05 +0800 Subject: [PATCH] Register forward and backward operations of Ceil --- mindspore/ops/_grad/grad_math_ops.py | 15 +++++++++++ mindspore/ops/_op_impl/tbe/__init__.py | 1 + mindspore/ops/_op_impl/tbe/ceil.py | 36 ++++++++++++++++++++++++++ mindspore/ops/operations/__init__.py | 5 ++-- mindspore/ops/operations/math_ops.py | 29 +++++++++++++++++++++ tests/ut/python/ops/test_ops.py | 5 ++++ 6 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 mindspore/ops/_op_impl/tbe/ceil.py diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 07bfb2c7af..6a484ae012 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -254,6 +254,21 @@ def get_bprop_floor(self): shape_ = P.Shape() dtype_ = P.DType() + def bprop(x, out, dout): + bc_x = fill_(dtype_(x), shape_(x), 0.) + return (bc_x,) + + + return bprop + + +@bprop_getters.register(P.Ceil) +def get_bprop_ceil(self): + """Grad definition for `ceil` operation.""" + fill_ = P.Fill() + shape_ = P.Shape() + dtype_ = P.DType() + def bprop(x, out, dout): bc_x = fill_(dtype_(x), shape_(x), 0.) return (bc_x,) diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index d7694f820f..12e95cf781 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -178,6 +178,7 @@ from .space_to_batch import _space_to_batch_tbe from .depth_to_space import _depth_to_space_tbe from .space_to_depth import _space_to_depth_tbe from .floor import _floor_tbe +from .ceil import _ceil_tbe from .log1p import _log1p_tbe from .resize_bilinear import _resize_bilinear_tbe from .resize_bilinear_grad import _resize_bilinear_grad_tbe diff --git a/mindspore/ops/_op_impl/tbe/ceil.py b/mindspore/ops/_op_impl/tbe/ceil.py new file mode 100644 index 0000000000..d9a127603f --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/ceil.py @@ -0,0 +1,36 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ceil op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +ceil_op_info = TBERegOp("Ceil") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("ceil.so") \ + .compute_cost(10) \ + .kernel_name("ceil") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(ceil_op_info) +def _ceil_tbe(): + """Ceil TBE register""" + return diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index f00ddb90a0..15e33c6823 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -43,8 +43,8 @@ from .inner_ops import ScalarCast from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, BitwiseXor, ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, - Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, - Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, + Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil, + Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, NPUAllocFloatStatus, NPUClearFloatStatus, @@ -243,6 +243,7 @@ __all__ = [ 'SigmoidCrossEntropyWithLogits', 'FloorDiv', 'FloorMod', + 'Ceil', 'Acosh', 'Asinh', "PReLU", diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index c2d3e9678a..8322ae2007 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1401,6 +1401,35 @@ class FloorMod(_MathBinaryOp): """ +class Ceil(PrimitiveWithInfer): + """ + Round a tensor up to the closest integer element-wise. + + Inputs: + - **input_x** (Tensor) - The input tensor. Its element data type must be float. + + Outputs: + Tensor, has the same shape as `input_x`. + + Examples: + >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) + >>> ceil_op = P.Ceil() + >>> ceil_op(input_x) + [2.0, 3.0, -1.0] + """ + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['x'], outputs=['y']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.name) + return x_dtype + + class Acosh(PrimitiveWithInfer): """ Compute inverse hyperbolic cosine of x element-wise. diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 91315f692e..f9b7ee6483 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -402,6 +402,11 @@ test_case_math_ops = [ 'desc_inputs': [[2, 512, 56, 56]], 'desc_bprop': [[2, 512, 56, 56]], 'skip': ['backward']}), + ('Ceil', { + 'block': P.Ceil(), + 'desc_inputs': [[2, 512, 56, 56]], + 'desc_bprop': [[2, 512, 56, 56]], + 'skip': ['backward']}), ('ACos', { 'block': P.ACos(), 'desc_inputs': [Tensor(np.array([2., 3.]).astype(np.float32))],