| @@ -0,0 +1,95 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "backend/kernel_compiler/cpu/isfinite_cpu_kernel.h" | |||
| #include <cmath> | |||
| #include "abstract/utils.h" | |||
| #include "runtime/device/cpu/cpu_device_address.h" | |||
| namespace mindspore { | |||
| namespace kernel { | |||
| void IsFiniteCPUKernel::InitKernel(const CNodePtr &kernelNode) { | |||
| MS_EXCEPTION_IF_NULL(kernelNode); | |||
| size_t input_num = AnfAlgo::GetInputTensorNum(kernelNode); | |||
| if (input_num != 1) { | |||
| MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but IsFiniteCPUKernel needs 1 inputs."; | |||
| } | |||
| size_t output_num = AnfAlgo::GetOutputTensorNum(kernelNode); | |||
| if (output_num != 1) { | |||
| MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but IsFiniteCPUKernel needs 1 output."; | |||
| } | |||
| input_dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernelNode, 0); | |||
| if (dtype_map_.find(input_dtype_) == dtype_map_.end()) { | |||
| MS_LOG(EXCEPTION) << "Unsupported input type found."; | |||
| } | |||
| } | |||
| bool IsFiniteCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> & /*workspace*/, | |||
| const std::vector<kernel::AddressPtr> &outputs) { | |||
| if (input_dtype_ == kNumberTypeFloat16) { | |||
| LaunchKernelFloat16(inputs, outputs); | |||
| } else if (input_dtype_ == kNumberTypeFloat32 || input_dtype_ == kNumberTypeFloat) { | |||
| LaunchKernelFloat<float>(inputs, outputs); | |||
| } else if (input_dtype_ == kNumberTypeFloat64) { | |||
| LaunchKernelFloat<double>(inputs, outputs); | |||
| } else if (dtype_map_.find(input_dtype_) != dtype_map_.end()) { | |||
| LaunchKernelOther(inputs, outputs); | |||
| } else { | |||
| MS_LOG(EXCEPTION) << "Only support bool, int, uint, float, but actual data type is " << TypeIdLabel(input_dtype_); | |||
| } | |||
| return true; | |||
| } | |||
| void IsFiniteCPUKernel::LaunchKernelFloat16(const std::vector<AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> &outputs) { | |||
| float16 *input = reinterpret_cast<float16 *>(inputs[0]->addr); | |||
| bool *output = reinterpret_cast<bool *>(outputs[0]->addr); | |||
| size_t elem_num = inputs[0]->size / sizeof(float16); | |||
| for (size_t i = 0; i < elem_num; i++) { | |||
| float temp_num = static_cast<float>(input[i]); | |||
| output[i] = !std::isinf(temp_num) && !std::isnan(temp_num); | |||
| } | |||
| } | |||
| template <typename T> | |||
| void IsFiniteCPUKernel::LaunchKernelFloat(const std::vector<AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> &outputs) { | |||
| T *input = reinterpret_cast<T *>(inputs[0]->addr); | |||
| bool *output = reinterpret_cast<bool *>(outputs[0]->addr); | |||
| size_t elem_num = inputs[0]->size / sizeof(T); | |||
| for (size_t i = 0; i < elem_num; i++) { | |||
| output[i] = !std::isinf(input[i]) && !std::isnan(input[i]); | |||
| } | |||
| } | |||
| void IsFiniteCPUKernel::LaunchKernelOther(const std::vector<AddressPtr> &inputs, | |||
| const std::vector<kernel::AddressPtr> &outputs) { | |||
| bool *output = reinterpret_cast<bool *>(outputs[0]->addr); | |||
| auto type_iter = dtype_map_.find(input_dtype_); | |||
| size_t elem_num = inputs[0]->size / (type_iter->second); | |||
| for (size_t i = 0; i < elem_num; i++) { | |||
| output[i] = true; | |||
| } | |||
| } | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,93 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ISFINITE_CPU_KERNEL_H_ | |||
| #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ISFINITE_CPU_KERNEL_H_ | |||
| #include <vector> | |||
| #include <map> | |||
| #include "backend/kernel_compiler/cpu/cpu_kernel.h" | |||
| #include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" | |||
| namespace mindspore { | |||
| namespace kernel { | |||
| class IsFiniteCPUKernel : public CPUKernel { | |||
| public: | |||
| IsFiniteCPUKernel() = default; | |||
| ~IsFiniteCPUKernel() override = default; | |||
| void InitKernel(const CNodePtr &kernelNode) override; | |||
| bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, | |||
| const std::vector<AddressPtr> &outputs) override; | |||
| private: | |||
| template <typename T> | |||
| void LaunchKernelFloat(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs); | |||
| void LaunchKernelOther(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs); | |||
| void LaunchKernelFloat16(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs); | |||
| private: | |||
| std::map<TypeId, size_t> dtype_map_ = {{kNumberTypeBool, sizeof(bool)}, {kNumberTypeInt8, sizeof(int8_t)}, | |||
| {kNumberTypeInt16, sizeof(int16_t)}, {kNumberTypeInt32, sizeof(int32_t)}, | |||
| {kNumberTypeInt64, sizeof(int64_t)}, {kNumberTypeFloat16, sizeof(float16)}, | |||
| {kNumberTypeFloat32, sizeof(float)}, {kNumberTypeFloat64, sizeof(double)}, | |||
| {kNumberTypeUInt8, sizeof(uint8_t)}, {kNumberTypeUInt16, sizeof(uint16_t)}, | |||
| {kNumberTypeUInt32, sizeof(uint32_t)}, {kNumberTypeUInt64, sizeof(uint64_t)}}; | |||
| TypeId input_dtype_{kTypeUnknown}; | |||
| }; | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| MS_REG_CPU_KERNEL(IsFinite, KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeBool), | |||
| IsFiniteCPUKernel); | |||
| } // namespace kernel | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ISFINITE_CPU_KERNEL_H_ | |||
| @@ -3007,7 +3007,7 @@ class IsFinite(PrimitiveWithInfer): | |||
| Tensor, has the same shape of input, and the dtype is bool. | |||
| Supported Platforms: | |||
| ``Ascend`` ``GPU`` | |||
| ``Ascend`` ``GPU`` ``CPU`` | |||
| Examples: | |||
| >>> is_finite = ops.IsFinite() | |||
| @@ -0,0 +1,100 @@ | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| import mindspore.context as context | |||
| import mindspore.nn as nn | |||
| from mindspore import Tensor | |||
| from mindspore.ops import operations as P | |||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | |||
| class Net(nn.Cell): | |||
| def __init__(self): | |||
| super(Net, self).__init__() | |||
| self.ops = P.IsFinite() | |||
| def construct(self, x): | |||
| return self.ops(x) | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_cpu_training | |||
| @pytest.mark.env_onecard | |||
| def test_net(): | |||
| x0 = Tensor(np.array([np.log(-1), 0.4, np.log(0)]).astype(np.float16)) | |||
| x1 = Tensor(np.array([np.log(-1), 0.4, np.log(0)]).astype(np.float32)) | |||
| x2 = Tensor(np.array([np.log(-1), 0.4, np.log(0)]).astype(np.float64)) | |||
| x3 = Tensor(np.array([4, 1, -5]).astype(np.int8)) | |||
| x4 = Tensor(np.array([4, 1, -5]).astype(np.int16)) | |||
| x5 = Tensor(np.array([4, 1, -5]).astype(np.int32)) | |||
| x6 = Tensor(np.array([4, 1, -5]).astype(np.int64)) | |||
| x7 = Tensor(np.array([4, 1, -5]).astype(np.uint8)) | |||
| x8 = Tensor(np.array([4, 1, -5]).astype(np.uint16)) | |||
| x9 = Tensor(np.array([4, 1, -5]).astype(np.uint32)) | |||
| x10 = Tensor(np.array([4, 1, -5]).astype(np.uint64)) | |||
| x11 = Tensor(np.array([False, True, False]).astype(np.bool_)) | |||
| net = Net() | |||
| out = net(x0).asnumpy() | |||
| expect = [False, True, False] | |||
| assert np.all(out == expect) | |||
| out = net(x1).asnumpy() | |||
| expect = [False, True, False] | |||
| assert np.all(out == expect) | |||
| out = net(x2).asnumpy() | |||
| expect = [False, True, False] | |||
| assert np.all(out == expect) | |||
| out = net(x3).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x4).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x5).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x6).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x7).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x8).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x9).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x10).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||
| out = net(x11).asnumpy() | |||
| expect = [True, True, True] | |||
| assert np.all(out == expect) | |||