diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc index 905d024f5e..d4137c09b6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc @@ -41,15 +41,7 @@ int ConcatFp16CPUKernel::Init() { return ReSize(); } -int ConcatFp16CPUKernel::ReSize() { - FreeTmpBuffer(); - auto ret = MallocTmpBuffer(); - if (ret != RET_OK) { - FreeTmpBuffer(); - return ret; - } - return ConcatBaseCPUKernel::ReSize(); -} +int ConcatFp16CPUKernel::ReSize() { return ConcatBaseCPUKernel::ReSize(); } int ConcatFp16CPUKernel::MallocTmpBuffer() { for (const auto &in_tensor : in_tensors_) { @@ -105,6 +97,13 @@ int ConcatFp16CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } + + auto ret = MallocTmpBuffer(); + if (ret != RET_OK) { + FreeTmpBuffer(); + return ret; + } + auto input_num = in_tensors_.size(); std::vector inputs_output_shape(input_num + 1, nullptr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index 8ba4cb05f1..c17baab5a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -58,17 +58,7 @@ int ReduceFp16CPUKernel::Init() { } int ReduceFp16CPUKernel::ReSize() { - FreeTmpBuffer(); - auto ret = ReduceBaseCPUKernel::ReSize(); - if (ret != RET_OK) { - return ret; - } - ret = MallocTmpBuffer(); - if (ret != RET_OK) { - FreeTmpBuffer(); - return ret; - } - return RET_OK; + return ReduceBaseCPUKernel::ReSize(); } int ReduceFp16CPUKernel::CallReduceUnit(int task_id) { @@ -94,6 +84,12 @@ int ReduceFp16CPUKernel::Run() { return prepare_ret; } + auto ret = MallocTmpBuffer(); + if (ret != RET_OK) { + FreeTmpBuffer(); + return ret; + } + tmp_shape_ = in_tensors_.at(0)->shape(); auto in_tensor = in_tensors_.at(0); if (in_tensor->data_type() == kNumberTypeFloat32 || in_tensor->data_type() == kNumberTypeFloat) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc index 3c0433a509..efd45f5213 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc @@ -59,12 +59,6 @@ int TransposeFp16CPUKernel::ReSize() { param->out_strides_[i] = out_shape[i + 1] * param->out_strides_[i + 1]; } - FreeFp16Buffer(); - auto ret = MallocFp16Buffer(); - if (ret != RET_OK) { - FreeFp16Buffer(); - return ret; - } return RET_OK; } @@ -149,10 +143,16 @@ int TransposeFp16CPUKernel::Run() { auto &out_tensor = out_tensors_.front(); if (in_tensor == nullptr || out_tensor == nullptr) { MS_LOG(ERROR) << "null pointer referencing."; - FreeFp16Buffer(); return RET_ERROR; } + // malloc when Run + ret = MallocFp16Buffer(); + if (ret != RET_OK) { + FreeFp16Buffer(); + return ret; + } + if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) { in_data_ = reinterpret_cast(in_tensor->Data()); Float32ToFloat16(in_data_, fp16_in_data_, in_tensor->ElementsNum());