diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc index 8e18560260..d9b0f4616e 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc @@ -46,13 +46,13 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) { // 2 set value of tensor auto data_ptr = indices_tensor->data_c(); MS_EXCEPTION_IF_NULL(data_ptr); - std::vector half_data; + std::vector half_data; for (size_t i = 0; i < last_dim; ++i) { - half_data.emplace_back(Eigen::half(static_cast(i))); + half_data.emplace_back(float16(static_cast(i))); } for (size_t i = 0; i < last_dim; ++i) { - auto gap = static_cast(i) - static_cast(Eigen::half(static_cast(i))); - half_data.emplace_back(Eigen::half(static_cast(gap))); + auto gap = static_cast(i) - static_cast(float16(static_cast(i))); + half_data.emplace_back(float16(static_cast(gap))); } auto elem_num = last_dim * kFloat16Len * 2; auto ret_code = memcpy_s(data_ptr, static_cast(indices_tensor->data().nbytes()), half_data.data(), elem_num); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/fused_batch_norm_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/fused_batch_norm_fusion.cc index b4a2af2bd1..06fc06535c 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/fused_batch_norm_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/fused_batch_norm_fusion.cc @@ -91,9 +91,9 @@ ValuePtr FusedBatchNormFusion::GetFactor(const EquivPtr &equiv) const { auto tensor_ptr = value->cast(); MS_EXCEPTION_IF_NULL(tensor_ptr); if (tensor_ptr->data_type() == kNumberTypeFloat16) { - auto *half_data = static_cast(tensor_ptr->data_c()); + auto *half_data = static_cast(tensor_ptr->data_c()); MS_EXCEPTION_IF_NULL(half_data); - float float_data = Eigen::half_impl::half_to_float(half_data[0]); + float float_data = half_to_float(half_data[0]); return MakeValue(float_data); } else if (tensor_ptr->data_type() == kNumberTypeFloat32) { auto *tensor_data = static_cast(tensor_ptr->data_c()); diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 1b10a7d2f7..3996ab7816 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -138,9 +138,9 @@ template void TransDataSrc2Fp16(const TypeIdArgs &args, void *dst, const size_t data_size) { CheckMemSize(args); auto src_data = static_cast(args.data); - auto half_data = static_cast(dst); + auto half_data = static_cast(dst); for (size_t i = 0; i < data_size; i++) { - half_data[i] = Eigen::half(src_data[i]); + half_data[i] = float16(src_data[i]); } } diff --git a/mindspore/ccsrc/minddata/dataset/core/data_type.h b/mindspore/ccsrc/minddata/dataset/core/data_type.h index ab48c3fc78..2e17218d5d 100644 --- a/mindspore/ccsrc/minddata/dataset/core/data_type.h +++ b/mindspore/ccsrc/minddata/dataset/core/data_type.h @@ -25,8 +25,7 @@ #include "minddata/dataset/core/pybind_support.h" namespace py = pybind11; #else -#include "Eigen/Core" -using float16 = Eigen::half; +#include "base/float16.h" #endif #include "minddata/dataset/core/constants.h" namespace mindspore { diff --git a/mindspore/ccsrc/minddata/dataset/core/pybind_support.h b/mindspore/ccsrc/minddata/dataset/core/pybind_support.h index 01c39987c1..7a553b9fef 100644 --- a/mindspore/ccsrc/minddata/dataset/core/pybind_support.h +++ b/mindspore/ccsrc/minddata/dataset/core/pybind_support.h @@ -21,10 +21,9 @@ #include "pybind11/numpy.h" #include "pybind11/pybind11.h" -#include "Eigen/Core" +#include "base/float16.h" namespace py = pybind11; -using float16 = Eigen::half; namespace pybind11 { namespace detail { diff --git a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc index 6a681c1660..e907d2cac5 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/datasetops/batch_op.cc @@ -27,8 +27,6 @@ #include "minddata/dataset/engine/opt/pass.h" #include "minddata/dataset/kernels/data/data_utils.h" -using float16 = Eigen::half; - namespace mindspore { namespace dataset { BatchOp::Builder::Builder(int32_t batch_size) : builder_drop_(false), builder_pad_(false), builder_pad_map_({}) { diff --git a/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc b/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc index 29fd5ada8b..d0731fcea5 100644 --- a/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc +++ b/mindspore/ccsrc/minddata/dataset/kernels/data/data_utils.cc @@ -345,14 +345,14 @@ Status ToFloat16(const std::shared_ptr &input, std::shared_ptr * for (; out_itr != out_end; in_itr++, out_itr++) { float element = *in_itr; - float float16_max = static_cast(std::numeric_limits::max()); - float float16_min = static_cast(std::numeric_limits::lowest()); + float float16_max = static_cast(std::numeric_limits::max()); + float float16_min = static_cast(std::numeric_limits::lowest()); if (element > float16_max || element < float16_min) { RETURN_STATUS_UNEXPECTED("Value " + std::to_string(element) + " is outside of valid float16 range [" + std::to_string(float16_max) + ", " + std::to_string(float16_min) + "]."); } - *out_itr = Eigen::half(*in_itr); + *out_itr = float16(*in_itr); } return Status::OK(); diff --git a/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc index bfd8de81b0..8021c36c0c 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc @@ -113,7 +113,7 @@ bool FloatToHalfAndSyncHostToDevice(void *dst, size_t dst_size, const void *src, MS_EXCEPTION(ArgumentError) << "FloatToHalf failed. size not match src_size[" << src_size << "], dst_size[" << dst_size << "]"; } - std::vector half_data(elem_num); + std::vector half_data(elem_num); FloatToHalf(half_data.data(), src, elem_num); SyncMemory(dst, half_data.data(), dst_size, RT_MEMCPY_HOST_TO_DEVICE); return true; @@ -136,7 +136,7 @@ bool SyncDeviceToHostAndHalfToFloat(void *dst, size_t dst_size, const void *src, MS_EXCEPTION(ArgumentError) << "HalfToFloat failed. size not match src_size[" << src_size << "], dst_size[" << dst_size << "]"; } - std::vector half_data(elem_num); + std::vector half_data(elem_num); SyncMemory(half_data.data(), src, src_size, RT_MEMCPY_DEVICE_TO_HOST); HalfToFloat(dst, half_data.data(), elem_num); return true; diff --git a/mindspore/ccsrc/runtime/device/convert_tensor_utils.cc b/mindspore/ccsrc/runtime/device/convert_tensor_utils.cc index cfd9b0fbdf..45be09a38d 100644 --- a/mindspore/ccsrc/runtime/device/convert_tensor_utils.cc +++ b/mindspore/ccsrc/runtime/device/convert_tensor_utils.cc @@ -18,19 +18,19 @@ namespace mindspore { namespace device { void HalfToFloat(void *dst, const void *src, size_t elem_num) { - auto half_data = static_cast(src); + auto half_data = static_cast(src); auto float_data = static_cast(dst); for (size_t i = 0; i < elem_num; ++i) { - float tmp = Eigen::half_impl::half_to_float(half_data[i]); + float tmp = half_to_float(half_data[i]); float_data[i] = tmp; } } void FloatToHalf(void *dst, const void *src, size_t elem_num) { auto float_data = static_cast(src); - auto half_data = static_cast(dst); + auto half_data = static_cast(dst); for (size_t i = 0; i < elem_num; ++i) { - half_data[i] = Eigen::half(float_data[i]); + half_data[i] = float16(float_data[i]); } } diff --git a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h index 58fbd1bc70..25152687f9 100644 --- a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h +++ b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.h @@ -29,7 +29,6 @@ namespace lite { using int32 = int32_t; using int64 = int64_t; using uint64 = uint64_t; -using float16 = Eigen::half; class MSANFModelParser { public: MSANFModelParser() : producer_name_(""), model_version_(0), ir_version_(0) {} diff --git a/mindspore/core/base/float16.h b/mindspore/core/base/float16.h new file mode 100644 index 0000000000..9c23e0395a --- /dev/null +++ b/mindspore/core/base/float16.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CORE_BASE_FLOAT16_H_ +#define MINDSPORE_CORE_BASE_FLOAT16_H_ + +#if defined(ENABLE_ARM32) || defined(ENABLE_ARM64) +// Built for lite and ARM +#include + +using float16 = float16_t; +inline float half_to_float(float16 h) { return static_cast(h); } +#else +#include +#include "Eigen/Core" + +using float16 = Eigen::half; +using HalfToFloat = std::function; +const inline HalfToFloat half_to_float = Eigen::half_impl::half_to_float; +#endif +#endif // MINDSPORE_CORE_BASE_FLOAT16_H_ diff --git a/mindspore/core/ir/tensor.h b/mindspore/core/ir/tensor.h index c61add5a23..ede3d28e2e 100644 --- a/mindspore/core/ir/tensor.h +++ b/mindspore/core/ir/tensor.h @@ -22,12 +22,10 @@ #include #include -#include "Eigen/Core" #include "ir/device_sync.h" #include "ir/meta_tensor.h" #include "utils/log_adapter.h" - -using float16 = Eigen::half; +#include "base/float16.h" // brief mindspore namespace. //