| @@ -0,0 +1,100 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "backend/kernel_compiler/cpu/equal_cpu_kernel.h" | |||
| #include "runtime/device/cpu/cpu_device_address.h" | |||
| namespace mindspore { | |||
| namespace kernel { | |||
| void EqualCPUKernel::InitKernel(const CNodePtr &kernel_node) { | |||
| CheckParam(kernel_node); | |||
| dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0); | |||
| if (dtype_ != AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 1)) { | |||
| MS_LOG(EXCEPTION) << "Input0 and input1 must has the same data type"; | |||
| } | |||
| } | |||
| bool EqualCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> & /*workspace*/, | |||
| const std::vector<kernel::AddressPtr> &outputs) { | |||
| if (dtype_ == kNumberTypeBool) { | |||
| LaunchKernel<bool>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeInt8) { | |||
| LaunchKernel<int8_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeInt16) { | |||
| LaunchKernel<int16_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeInt32 || dtype_ == kNumberTypeInt) { | |||
| LaunchKernel<int32_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeInt64) { | |||
| LaunchKernel<int64_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeUInt8) { | |||
| LaunchKernel<uint8_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeUInt16) { | |||
| LaunchKernel<uint16_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeUInt32 || dtype_ == kNumberTypeUInt) { | |||
| LaunchKernel<uint32_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeUInt64) { | |||
| LaunchKernel<uint64_t>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeFloat16) { | |||
| LaunchKernel<float16>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeFloat32 || dtype_ == kNumberTypeFloat) { | |||
| LaunchKernel<float>(inputs, outputs); | |||
| } else if (dtype_ == kNumberTypeFloat64) { | |||
| LaunchKernel<double>(inputs, outputs); | |||
| } else { | |||
| MS_LOG(EXCEPTION) << "Only support bool, int, uint, float, but actual data type is " << TypeIdLabel(dtype_); | |||
| } | |||
| return true; | |||
| } | |||
| template <typename T> | |||
| void EqualCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs) { | |||
| T *left = reinterpret_cast<T *>(inputs[0]->addr); | |||
| T *right = reinterpret_cast<T *>(inputs[1]->addr); | |||
| bool *output = reinterpret_cast<bool *>(outputs[0]->addr); | |||
| size_t elem_num = inputs[0]->size / sizeof(T); | |||
| for (size_t i = 0; i < elem_num; i++) { | |||
| if (left[i] == right[i]) { | |||
| output[i] = true; | |||
| } else { | |||
| output[i] = false; | |||
| } | |||
| } | |||
| } | |||
| void EqualCPUKernel::CheckParam(const CNodePtr &kernel_node) { | |||
| size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); | |||
| if (input_num != 2) { | |||
| MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but EqualCPUKernel needs 2 inputs."; | |||
| } | |||
| size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); | |||
| if (output_num != 1) { | |||
| MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but EqualCPUKernel needs 1 output."; | |||
| } | |||
| auto input_shape0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); | |||
| auto input_shape1 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); | |||
| if (input_shape0.size() != input_shape1.size()) { | |||
| MS_LOG(EXCEPTION) << "Input0 and Input1 must have the same shape"; | |||
| } | |||
| for (size_t i = 0; i < input_shape0.size(); ++i) { | |||
| if (input_shape0[i] != input_shape1[i]) { | |||
| MS_LOG(EXCEPTION) << "Input0 and Input1 must have the same shape"; | |||
| } | |||
| } | |||
| } | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,75 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EQUAL_CPU_KERNEL_H_ | |||
| #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EQUAL_CPU_KERNEL_H_ | |||
| #include <vector> | |||
| #include <memory> | |||
| #include "backend/kernel_compiler/cpu/cpu_kernel.h" | |||
| #include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" | |||
| namespace mindspore { | |||
| namespace kernel { | |||
| class EqualCPUKernel : public CPUKernel { | |||
| public: | |||
| EqualCPUKernel() = default; | |||
| ~EqualCPUKernel() override = default; | |||
| void InitKernel(const CNodePtr &kernel_node) override; | |||
| bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||
| const std::vector<AddressPtr> &outputs) override; | |||
| template <typename T> | |||
| void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs); | |||
| private: | |||
| void CheckParam(const CNodePtr &kernel_node); | |||
| TypeId dtype_{kTypeUnknown}; | |||
| }; | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeBool).AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeInt8).AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeInt16).AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| MS_REG_CPU_KERNEL( | |||
| Equal, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool), | |||
| EqualCPUKernel); | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EQUAL_CPU_KERNEL_H_ | |||
| @@ -0,0 +1,101 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor | |||
| from mindspore.common.initializer import initializer | |||
| from mindspore.common.parameter import Parameter | |||
| from mindspore.ops import operations as P | |||
| context.set_context(mode=context.GRAPH_MODE, device_target="CPU") | |||
| class NetEqualBool(nn.Cell): | |||
| def __init__(self): | |||
| super(NetEqualBool, self).__init__() | |||
| self.equal = P.Equal() | |||
| x = Tensor(np.array([True, True, False]).astype(np.bool)) | |||
| y = Tensor(np.array([True, False, True]).astype(np.bool)) | |||
| self.x = Parameter(initializer(x, x.shape), name="x") | |||
| self.y = Parameter(initializer(y, y.shape), name="y") | |||
| def construct(self): | |||
| return self.equal(self.x, self.y) | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_cpu | |||
| @pytest.mark.env_onecard | |||
| def test_equal_bool(): | |||
| Equal = NetEqualBool() | |||
| output = Equal() | |||
| print("================================") | |||
| expect = np.array([True, False, False]).astype(np.bool) | |||
| print(output) | |||
| assert (output.asnumpy() == expect).all() | |||
| class NetEqualInt(nn.Cell): | |||
| def __init__(self): | |||
| super(NetEqualInt, self).__init__() | |||
| self.equal = P.Equal() | |||
| x = Tensor(np.array([1, 20, 5]).astype(np.int32)) | |||
| y = Tensor(np.array([2, 20, 5]).astype(np.int32)) | |||
| self.x = Parameter(initializer(x, x.shape), name="x") | |||
| self.y = Parameter(initializer(y, y.shape), name="y") | |||
| def construct(self): | |||
| return self.equal(self.x, self.y) | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_cpu | |||
| @pytest.mark.env_onecard | |||
| def test_equal_int(): | |||
| Equal = NetEqualInt() | |||
| output = Equal() | |||
| print("================================") | |||
| expect = np.array([False, True, True]).astype(np.bool) | |||
| print(output) | |||
| assert (output.asnumpy() == expect).all() | |||
| class NetEqualFloat(nn.Cell): | |||
| def __init__(self): | |||
| super(NetEqualFloat, self).__init__() | |||
| self.equal = P.Equal() | |||
| x = Tensor(np.array([1.2, 10.4, 5.5]).astype(np.float32)) | |||
| y = Tensor(np.array([1.2, 10.3, 5.4]).astype(np.float32)) | |||
| self.x = Parameter(initializer(x, x.shape), name="x") | |||
| self.y = Parameter(initializer(y, y.shape), name="y") | |||
| def construct(self): | |||
| return self.equal(self.x, self.y) | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_cpu | |||
| @pytest.mark.env_onecard | |||
| def test_equal_float(): | |||
| Equal = NetEqualFloat() | |||
| output = Equal() | |||
| print("================================") | |||
| expect = np.array([True, False, False]).astype(np.bool) | |||
| print(output) | |||
| assert (output.asnumpy() == expect).all() | |||