diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index e4c79952c9..5b6cd6588e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -60,6 +60,7 @@ int ArithmeticCPUKernel::ReSize() { outside_ *= param_->out_shape_[i]; } } + data_type_len_ = lite::DataTypeSize(in_tensors_.at(0)->data_type()); int ret = RET_OK; if (!IsScalarClac() && !IsBatchScalarCalc() && !IsBiasCalc()) { ret = ConstTensorBroadCast(); diff --git a/mindspore/lite/tools/benchmark/benchmark.cc b/mindspore/lite/tools/benchmark/benchmark.cc index a9d1454d3d..e9a74debe5 100644 --- a/mindspore/lite/tools/benchmark/benchmark.cc +++ b/mindspore/lite/tools/benchmark/benchmark.cc @@ -374,8 +374,12 @@ int Benchmark::CompareDataGetTotalBiasAndSize(const std::string &name, tensor::M bias = CompareData(name, tensor->shape(), mutableData); break; } + case TypeId::kNumberTypeBool: { + bias = CompareData(name, tensor->shape(), mutableData); + break; + } default: - MS_LOG(ERROR) << "Datatype " << msCalibDataType << " is not supported."; + MS_LOG(ERROR) << "Datatype " << tensor->data_type() << " is not supported."; return RET_ERROR; } if (bias < 0) { @@ -529,6 +533,8 @@ int Benchmark::PrintInputData() { std::cout << static_cast(in_data)[j] << " "; } else if (tensor_data_type == TypeId::kNumberTypeInt64) { std::cout << static_cast(in_data)[j] << " "; + } else if (tensor_data_type == TypeId::kNumberTypeBool) { + std::cout << static_cast(in_data)[j] << " "; } else { MS_LOG(ERROR) << "Datatype: " << tensor_data_type << " is not supported."; return RET_ERROR;