| @@ -254,6 +254,21 @@ def get_bprop_floor(self): | |||||
| shape_ = P.Shape() | shape_ = P.Shape() | ||||
| dtype_ = P.DType() | dtype_ = P.DType() | ||||
| def bprop(x, out, dout): | |||||
| bc_x = fill_(dtype_(x), shape_(x), 0.) | |||||
| return (bc_x,) | |||||
| return bprop | |||||
| @bprop_getters.register(P.Ceil) | |||||
| def get_bprop_ceil(self): | |||||
| """Grad definition for `ceil` operation.""" | |||||
| fill_ = P.Fill() | |||||
| shape_ = P.Shape() | |||||
| dtype_ = P.DType() | |||||
| def bprop(x, out, dout): | def bprop(x, out, dout): | ||||
| bc_x = fill_(dtype_(x), shape_(x), 0.) | bc_x = fill_(dtype_(x), shape_(x), 0.) | ||||
| return (bc_x,) | return (bc_x,) | ||||
| @@ -178,6 +178,7 @@ from .space_to_batch import _space_to_batch_tbe | |||||
| from .depth_to_space import _depth_to_space_tbe | from .depth_to_space import _depth_to_space_tbe | ||||
| from .space_to_depth import _space_to_depth_tbe | from .space_to_depth import _space_to_depth_tbe | ||||
| from .floor import _floor_tbe | from .floor import _floor_tbe | ||||
| from .ceil import _ceil_tbe | |||||
| from .log1p import _log1p_tbe | from .log1p import _log1p_tbe | ||||
| from .resize_bilinear import _resize_bilinear_tbe | from .resize_bilinear import _resize_bilinear_tbe | ||||
| from .resize_bilinear_grad import _resize_bilinear_grad_tbe | from .resize_bilinear_grad import _resize_bilinear_grad_tbe | ||||
| @@ -0,0 +1,36 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| """Ceil op""" | |||||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||||
| ceil_op_info = TBERegOp("Ceil") \ | |||||
| .fusion_type("ELEMWISE") \ | |||||
| .async_flag(False) \ | |||||
| .binfile_name("ceil.so") \ | |||||
| .compute_cost(10) \ | |||||
| .kernel_name("ceil") \ | |||||
| .partial_flag(True) \ | |||||
| .input(0, "x", False, "required", "all") \ | |||||
| .output(0, "y", False, "required", "all") \ | |||||
| .dtype_format(DataType.F16_Default, DataType.F16_Default) \ | |||||
| .dtype_format(DataType.F32_Default, DataType.F32_Default) \ | |||||
| .get_op_info() | |||||
| @op_info_register(ceil_op_info) | |||||
| def _ceil_tbe(): | |||||
| """Ceil TBE register""" | |||||
| return | |||||
| @@ -43,8 +43,8 @@ from .inner_ops import ScalarCast | |||||
| from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, | from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, | ||||
| BitwiseXor, | BitwiseXor, | ||||
| ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, | ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, | ||||
| Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, | |||||
| Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, | |||||
| Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil, | |||||
| Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, | |||||
| LogicalNot, LogicalOr, MatMul, Maximum, | LogicalNot, LogicalOr, MatMul, Maximum, | ||||
| Minimum, Mul, Neg, NMSWithMask, NotEqual, | Minimum, Mul, Neg, NMSWithMask, NotEqual, | ||||
| NPUAllocFloatStatus, NPUClearFloatStatus, | NPUAllocFloatStatus, NPUClearFloatStatus, | ||||
| @@ -243,6 +243,7 @@ __all__ = [ | |||||
| 'SigmoidCrossEntropyWithLogits', | 'SigmoidCrossEntropyWithLogits', | ||||
| 'FloorDiv', | 'FloorDiv', | ||||
| 'FloorMod', | 'FloorMod', | ||||
| 'Ceil', | |||||
| 'Acosh', | 'Acosh', | ||||
| 'Asinh', | 'Asinh', | ||||
| "PReLU", | "PReLU", | ||||
| @@ -1401,6 +1401,35 @@ class FloorMod(_MathBinaryOp): | |||||
| """ | """ | ||||
| class Ceil(PrimitiveWithInfer): | |||||
| """ | |||||
| Round a tensor up to the closest integer element-wise. | |||||
| Inputs: | |||||
| - **input_x** (Tensor) - The input tensor. Its element data type must be float. | |||||
| Outputs: | |||||
| Tensor, has the same shape as `input_x`. | |||||
| Examples: | |||||
| >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) | |||||
| >>> ceil_op = P.Ceil() | |||||
| >>> ceil_op(input_x) | |||||
| [2.0, 3.0, -1.0] | |||||
| """ | |||||
| @prim_attr_register | |||||
| def __init__(self): | |||||
| self.init_prim_io_names(inputs=['x'], outputs=['y']) | |||||
| def infer_shape(self, x_shape): | |||||
| return x_shape | |||||
| def infer_dtype(self, x_dtype): | |||||
| validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.name) | |||||
| return x_dtype | |||||
| class Acosh(PrimitiveWithInfer): | class Acosh(PrimitiveWithInfer): | ||||
| """ | """ | ||||
| Compute inverse hyperbolic cosine of x element-wise. | Compute inverse hyperbolic cosine of x element-wise. | ||||
| @@ -402,6 +402,11 @@ test_case_math_ops = [ | |||||
| 'desc_inputs': [[2, 512, 56, 56]], | 'desc_inputs': [[2, 512, 56, 56]], | ||||
| 'desc_bprop': [[2, 512, 56, 56]], | 'desc_bprop': [[2, 512, 56, 56]], | ||||
| 'skip': ['backward']}), | 'skip': ['backward']}), | ||||
| ('Ceil', { | |||||
| 'block': P.Ceil(), | |||||
| 'desc_inputs': [[2, 512, 56, 56]], | |||||
| 'desc_bprop': [[2, 512, 56, 56]], | |||||
| 'skip': ['backward']}), | |||||
| ('ACos', { | ('ACos', { | ||||
| 'block': P.ACos(), | 'block': P.ACos(), | ||||
| 'desc_inputs': [Tensor(np.array([2., 3.]).astype(np.float32))], | 'desc_inputs': [Tensor(np.array([2., 3.]).astype(np.float32))], | ||||