Merge pull request !1508 from zhaozhenlong/op/bitwise-and-or-xortags/v0.5.0-beta
| @@ -111,6 +111,7 @@ number_type = (int8, | |||
| float64,) | |||
| int_type = (int8, int16, int32, int64,) | |||
| uint_type = (uint8, uint16, uint32, uint64) | |||
| float_type = (float16, float32, float64,) | |||
| _simple_types = { | |||
| @@ -202,3 +202,6 @@ from .scatter_add import _scatter_add_tbe | |||
| from .atan2 import _atan2_tbe | |||
| from .batch_to_space_nd import _batch_to_space_nd_tbe | |||
| from .space_to_batch_nd import _space_to_batch_nd_tbe | |||
| from .bitwise_and import bitwise_and_op_info | |||
| from .bitwise_or import bitwise_or_op_info | |||
| from .bitwise_xor import bitwise_xor_op_info | |||
| @@ -0,0 +1,37 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """BitwiseAnd op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| bitwise_and_op_info = TBERegOp("BitwiseAnd") \ | |||
| .fusion_type("OPAQUE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("bitwise_and.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("bitwise_and") \ | |||
| .partial_flag(True) \ | |||
| .input(0, "x1", False, "required", "all") \ | |||
| .input(1, "x2", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ | |||
| .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ | |||
| .get_op_info() | |||
| @op_info_register(bitwise_and_op_info) | |||
| def _bitwise_and_tbe(): | |||
| """BitwiseAnd TBE register""" | |||
| return | |||
| @@ -0,0 +1,37 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """BitwiseOr op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| bitwise_or_op_info = TBERegOp("BitwiseOr") \ | |||
| .fusion_type("OPAQUE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("bitwise_or.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("bitwise_or") \ | |||
| .partial_flag(True) \ | |||
| .input(0, "x1", False, "required", "all") \ | |||
| .input(1, "x2", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ | |||
| .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ | |||
| .get_op_info() | |||
| @op_info_register(bitwise_or_op_info) | |||
| def _bitwise_or_tbe(): | |||
| """BitwiseOr TBE register""" | |||
| return | |||
| @@ -0,0 +1,37 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """BitwiseXor op""" | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| bitwise_xor_op_info = TBERegOp("BitwiseXor") \ | |||
| .fusion_type("OPAQUE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("bitwise_xor.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("bitwise_xor") \ | |||
| .partial_flag(True) \ | |||
| .input(0, "x1", False, "required", "all") \ | |||
| .input(1, "x2", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ | |||
| .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ | |||
| .get_op_info() | |||
| @op_info_register(bitwise_xor_op_info) | |||
| def _bitwise_xor_tbe(): | |||
| """BitwiseXor TBE register""" | |||
| return | |||
| @@ -39,7 +39,7 @@ from .debug_ops import (ImageSummary, InsertGradientOf, HookBackward, ScalarSumm | |||
| TensorSummary, HistogramSummary, Print) | |||
| from .control_ops import ControlDepend, GeSwitch, Merge | |||
| from .inner_ops import ScalarCast | |||
| from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, | |||
| from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, BitwiseAnd, BitwiseOr, BitwiseXor, | |||
| ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, | |||
| Cos, Div, Equal, EqualCount, Exp, Erf, Erfc, Floor, FloorDiv, FloorMod, Acosh, | |||
| Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, | |||
| @@ -267,7 +267,10 @@ __all__ = [ | |||
| "ApplyCenteredRMSProp", | |||
| "SpaceToBatchND", | |||
| "BatchToSpaceND", | |||
| "SquareSumAll" | |||
| "SquareSumAll", | |||
| "BitwiseAnd", | |||
| "BitwiseOr", | |||
| "BitwiseXor" | |||
| ] | |||
| __all__.extend(_quant_ops.__all__) | |||
| @@ -71,7 +71,7 @@ class _BinaryOp(PrimitiveWithInfer): | |||
| @prim_attr_register | |||
| def __init__(self): | |||
| """init _MathBinaryOp""" | |||
| """init _BinaryOp""" | |||
| self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) | |||
| def infer_shape(self, x_shape, y_shape): | |||
| @@ -93,6 +93,27 @@ class _MathBinaryOp(_BinaryOp): | |||
| return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name) | |||
| class _BitwiseBinaryOp(_MathBinaryOp): | |||
| """ | |||
| Define bitwise binary operators. | |||
| """ | |||
| @prim_attr_register | |||
| def __init__(self): | |||
| """init _BitwiseBinaryOp""" | |||
| self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y']) | |||
| @staticmethod | |||
| def _check_bitwise_op_input_type(x1_type, x2_type, prim): | |||
| args = {'x1': x1_type, 'x2': x2_type} | |||
| valid_types = mstype.int_type + mstype.uint_type | |||
| validator.check_tensor_type_same(args, valid_types, prim) | |||
| return x1_type | |||
| def infer_dtype(self, x1_type, x2_type): | |||
| return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name) | |||
| class TensorAdd(_MathBinaryOp): | |||
| """ | |||
| Adds two input tensors element-wise. | |||
| @@ -2183,3 +2204,63 @@ class SquareSumAll(PrimitiveWithInfer): | |||
| validator.check_tensor_type_same({'x1_type': x_type}, [mstype.float16, mstype.float32], self.name) | |||
| validator.check_tensor_type_same({'x2_type': y_type}, [mstype.float16, mstype.float32], self.name) | |||
| return x_type, y_type | |||
| class BitwiseAnd(_BitwiseBinaryOp): | |||
| """ | |||
| Returns bitwise `and` of two tensors element-wise. | |||
| Inputs: | |||
| - **input_x1** (Tensor) - The input tensor with int or uint type. | |||
| - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. | |||
| Outputs: | |||
| - **y** (Tensor) - The same type as the `input_x1`. | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) | |||
| >>> bitwise_and = P.BitwiseAnd() | |||
| >>> bitwise_and(input_x1, input_x2) | |||
| [0, 0, 1, -1, 1, 0, 1] | |||
| """ | |||
| class BitwiseOr(_BitwiseBinaryOp): | |||
| """ | |||
| Returns bitwise `or` of two tensors element-wise. | |||
| Inputs: | |||
| - **input_x1** (Tensor) - The input tensor with int or uint type. | |||
| - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. | |||
| Outputs: | |||
| - **y** (Tensor) - The same type as the `input_x1`. | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) | |||
| >>> bitwise_or = P.BitwiseOr() | |||
| >>> bitwise_or(input_x1, input_x2) | |||
| [0, 1, 1, -1, -1, 3, 3] | |||
| """ | |||
| class BitwiseXor(_BitwiseBinaryOp): | |||
| """ | |||
| Returns bitwise `xor` of two tensors element-wise. | |||
| Inputs: | |||
| - **input_x1** (Tensor) - The input tensor with int or uint type. | |||
| - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`. | |||
| Outputs: | |||
| - **y** (Tensor) - The same type as the `input_x1`. | |||
| Examples: | |||
| >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16) | |||
| >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16) | |||
| >>> bitwise_xor = P.BitwiseXor() | |||
| >>> bitwise_xor(input_x1, input_x2) | |||
| [0, 1, 0, 0, -2, 3, 2] | |||
| """ | |||
| @@ -242,6 +242,36 @@ class ApplyRMSNet(nn.Cell): | |||
| return out | |||
| test_case_math_ops = [ | |||
| ('BitwiseAnd', { | |||
| 'block': P.BitwiseAnd(), | |||
| 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), | |||
| Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('BitwiseAnd_1', { | |||
| 'block': P.BitwiseAnd(), | |||
| 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), | |||
| Tensor(np.array([1, 1, 1]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('BitwiseOr', { | |||
| 'block': P.BitwiseOr(), | |||
| 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), | |||
| Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('BitwiseOr_1', { | |||
| 'block': P.BitwiseOr(), | |||
| 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), | |||
| Tensor(np.array([1, 1, 1]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('BitwiseXor', { | |||
| 'block': P.BitwiseXor(), | |||
| 'desc_inputs': [Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16), | |||
| Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('BitwiseXor_1', { | |||
| 'block': P.BitwiseXor(), | |||
| 'desc_inputs': [Tensor(np.array([[1, 2, 3], [-1, -2, -3]]), mstype.int16), | |||
| Tensor(np.array([1, 1, 1]), mstype.int16)], | |||
| 'skip': ['backward']}), | |||
| ('Neg', { | |||
| 'block': P.Neg(), | |||
| 'desc_inputs': [[1, 3, 4, 4]], | |||