Browse Source

!9301 [MS][LITE]Fix convolution_fp16

From: @gongdaguo
Reviewed-by: @zhang_xue_tong,@hangangqiang
Signed-off-by: @zhang_xue_tong
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
69249ddeb1
1 changed files with 2 additions and 2 deletions
  1. +2
    -2
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc

+ 2
- 2
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -296,7 +296,7 @@ lite::Tensor *CreateOutputTensor(std::vector<int> out_shape, const std::vector<l
MS_LOG(ERROR) << "new tmp_out_tensor failed.";
return nullptr;
}
out_tensor->set_data_type(outputs.at(index)->data_type());
out_tensor->set_data_type(mindspore::kNumberTypeFloat16);
out_tensor->set_format(outputs.at(index)->format());
if (infered_flag) {
out_tensor->set_shape(out_shape);
@@ -356,7 +356,7 @@ kernel::LiteKernel *CpuGroupConvFp16KernelCreator(const std::vector<lite::Tensor
return nullptr;
}
// create new input for each group
auto in_tensor = CreateInputTensor(inputs.front()->data_type(), in_shape, infered_flag);
auto in_tensor = CreateInputTensor(mindspore::kNumberTypeFloat16, in_shape, infered_flag);
if (in_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);


Loading…
Cancel
Save