From 10a0606ced21baea8c8a7b513fd261831beb937b Mon Sep 17 00:00:00 2001 From: yangruoqi713 Date: Thu, 15 Apr 2021 09:37:56 +0800 Subject: [PATCH] [MSLITE][DEVELOP] fix bug: arm cpu op group conv memory leak --- ...nvolution.cc => group_convolution_base.cc} | 58 +++----- ...convolution.h => group_convolution_base.h} | 22 +-- .../arm/base/group_convolution_creator.cc | 38 +++-- .../arm/base/group_convolution_creator.h | 2 +- .../kernel/arm/fp16/group_convolution_fp16.cc | 140 +----------------- .../kernel/arm/fp16/group_convolution_fp16.h | 27 +--- .../arm/fp32/convolution_delegate_fp32.cc | 8 +- .../kernel/arm/fp32/group_convolution_fp32.cc | 53 +++++++ .../kernel/arm/fp32/group_convolution_fp32.h | 40 +++++ .../kernel/arm/int8/group_convolution_int8.cc | 10 +- .../kernel/arm/int8/group_convolution_int8.h | 12 +- 11 files changed, 174 insertions(+), 236 deletions(-) rename mindspore/lite/src/runtime/kernel/arm/base/{group_convolution.cc => group_convolution_base.cc} (71%) rename mindspore/lite/src/runtime/kernel/arm/base/{group_convolution.h => group_convolution_base.h} (73%) create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution.cc b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.cc similarity index 71% rename from mindspore/lite/src/runtime/kernel/arm/base/group_convolution.cc rename to mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.cc index 02f3342d95..44e8a1a3fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "src/runtime/kernel/arm/base/group_convolution.h" +#include "src/runtime/kernel/arm/base/group_convolution_base.h" #include "src/runtime/infer_manager.h" #include "include/errorcode.h" @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; namespace mindspore::kernel { -int GroupConvolutionCPUKernel::Init() { +int GroupConvolutionBaseCPUKernel::Init() { for (int i = 0; i < group_num_; ++i) { auto sub_conv = group_convs_.at(i); if (sub_conv == nullptr) { @@ -39,7 +39,7 @@ int GroupConvolutionCPUKernel::Init() { return RET_OK; } -int GroupConvolutionCPUKernel::ReSize() { +int GroupConvolutionBaseCPUKernel::ReSize() { for (int i = 0; i < group_num_; ++i) { auto ret = group_convs_.at(i)->ReSize(); if (ret != RET_OK) { @@ -52,7 +52,7 @@ int GroupConvolutionCPUKernel::ReSize() { return RET_OK; } -void GroupConvolutionCPUKernel::FreeSubKernel() { +void GroupConvolutionBaseCPUKernel::FreeSubKernel() { for (auto &sub_conv : group_convs_) { // free sub conv input tensors / output tensors manually auto sub_in_tensors = sub_conv->in_tensors(); @@ -72,7 +72,7 @@ void GroupConvolutionCPUKernel::FreeSubKernel() { } } -int GroupConvolutionCPUKernel::PreProcess() { +int GroupConvolutionBaseCPUKernel::PreProcess() { if (!InferShapeDone()) { op_parameter_->infer_flag_ = true; @@ -133,50 +133,28 @@ int GroupConvolutionCPUKernel::PreProcess() { return RET_OK; } -void GroupConvolutionCPUKernel::SeparateInput(int group_id) { - auto in_tensor = in_tensors_.front(); - int in_plane = in_tensor->Height() * in_tensor->Width() * in_tensor->Batch(); - int sub_in_channel = conv_param_->input_channel_; - int ori_in_channel = sub_in_channel * group_num_; - auto sub_in_data = reinterpret_cast(group_convs_.at(group_id)->in_tensors().front()->data_c()); - float *src_ptr = reinterpret_cast(ori_in_data_) + group_id * sub_in_channel; - float *dst_ptr = sub_in_data; - for (int i = 0; i < in_plane; ++i) { - memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(float)); - src_ptr += ori_in_channel; - dst_ptr += sub_in_channel; - } -} - -void GroupConvolutionCPUKernel::PostConcat(int group_id) { - auto out_tensor = out_tensors_.front(); - int out_plane = out_tensor->Height() * out_tensor->Width() * out_tensor->Batch(); - int sub_out_channel = conv_param_->output_channel_; - int ori_out_channel = sub_out_channel * group_num_; - auto sub_out_data = reinterpret_cast(group_convs_.at(group_id)->out_tensors().front()->data_c()); - float *src_ptr = sub_out_data; - float *dst_ptr = reinterpret_cast(ori_out_data_) + group_id * sub_out_channel; - for (int i = 0; i < out_plane; ++i) { - memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(float)); - src_ptr += sub_out_channel; - dst_ptr += ori_out_channel; - } -} - -int GroupConvolutionCPUKernel::Run() { +int GroupConvolutionBaseCPUKernel::Run() { ori_in_data_ = in_tensors().front()->data_c(); ori_out_data_ = out_tensors().front()->data_c(); for (int i = 0; i < group_num_; ++i) { // first, separate group conv input into several parts. This step must be in runtime stage. - SeparateInput(i); + auto ret = SeparateInput(i); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Separate input failed."; + return ret; + } // sun kernels run - auto ret = group_convs_.at(i)->Run(); + ret = group_convs_.at(i)->Run(); if (ret != RET_OK) { MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; return ret; } // post process, concat all outputs of sub-kernels into one output - PostConcat(i); + ret = PostConcat(i); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Concat output failed."; + return ret; + } } return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution.h b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.h similarity index 73% rename from mindspore/lite/src/runtime/kernel/arm/base/group_convolution.h rename to mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.h index 165ca94a5d..e6d8ff593c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_base.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ -#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_BASE_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_BASE_H_ #include #include @@ -25,23 +25,23 @@ #include "nnacl/fp32/conv_common_fp32.h" namespace mindspore::kernel { -class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel { +class GroupConvolutionBaseCPUKernel : public ConvolutionBaseCPUKernel { public: - GroupConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::InnerContext *ctx, - std::vector group_convs, const int group_num) + GroupConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx, + std::vector group_convs, const int group_num) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), group_convs_(std::move(group_convs)), group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num - ~GroupConvolutionCPUKernel() override { FreeSubKernel(); } + ~GroupConvolutionBaseCPUKernel() override { FreeSubKernel(); } int Init() override; int ReSize() override; int Run() override; int PreProcess() override; - virtual void SeparateInput(int group_id); - virtual void PostConcat(int group_id); + virtual int SeparateInput(int group_id) = 0; + virtual int PostConcat(int group_id) = 0; void FreeSubKernel(); protected: @@ -52,4 +52,4 @@ class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel { }; } // namespace mindspore::kernel -#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_BASE_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc index 23895fddd1..e69cb1c49e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc @@ -33,7 +33,11 @@ ConvParameter *CreateNewConvParameter(ConvParameter *parameter) { return conv_parameter; } -void FreeMemory(const std::vector *new_inputs, const std::vector *new_outputs) { +void FreeCurrentConv(ConvParameter *conv_param, const std::vector *new_inputs, + const std::vector *new_outputs) { + if (conv_param != nullptr) { + free(conv_param); + } for (auto &in_tensor : *new_inputs) { delete in_tensor; } @@ -99,20 +103,22 @@ void GroupConvCreator::CopyQuantParam(std::vector *tensors) { } } -bool GroupConvCreator::CheckIfValidPoint(void *ptr) { - if (ptr == nullptr) { - for (auto &sub_conv : group_convs_) { - delete sub_conv; +void GroupConvCreator::FreeGroupConvs() { + for (auto &sub_conv : group_convs_) { + for (auto &in_tensor : sub_conv->in_tensors()) { + delete in_tensor; + } + for (auto &out_tensor : sub_conv->out_tensors()) { + delete out_tensor; } - return false; + delete sub_conv; } - return true; } int GroupConvCreator::NewInputTensor(std::vector *tensors) { auto in_tensor = CreateVarTensor({input_shape_, schema::Format_NHWC, data_type_, lite::Tensor::Category::VAR, true}, infered_); - if (!CheckIfValidPoint(in_tensor)) { + if (in_tensor == nullptr) { return lite::RET_ERROR; } tensors->emplace_back(in_tensor); @@ -121,7 +127,7 @@ int GroupConvCreator::NewInputTensor(std::vector *tensors) { int GroupConvCreator::NewOutputTensor(std::vector *tensors, lite::Tensor *output) { auto out_tensor = CreateVarTensor({output_shape_, output->format(), data_type_, output->category(), false}, infered_); - if (!CheckIfValidPoint(out_tensor)) { + if (out_tensor == nullptr) { return lite::RET_ERROR; } if (is_quant_) { @@ -138,7 +144,7 @@ int GroupConvCreator::NewConstTensor(std::vector *tensors, int g } for (auto &info : const_tensor_list) { auto const_tensor = CreateConstTensor(origin_inputs_.at(info.first), info.second, group_id); - if (!CheckIfValidPoint(const_tensor)) { + if (const_tensor == nullptr) { return lite::RET_ERROR; } tensors->emplace_back(const_tensor); @@ -171,26 +177,30 @@ void GroupConvCreator::SetShapeOfTensors() { int GroupConvCreator::GetSingleConvParam(ConvParameter *conv_param, std::vector *new_inputs, std::vector *new_outputs, int group_id) { - if (!CheckIfValidPoint(conv_param)) { + if (conv_param == nullptr) { + FreeGroupConvs(); return lite::RET_ERROR; } // create new input for each group if (NewInputTensor(new_inputs) != lite::RET_OK) { MS_LOG(ERROR) << "new input tensor failed."; - FreeMemory(new_inputs, {}); + FreeGroupConvs(); + FreeCurrentConv(conv_param, new_inputs, {}); return lite::RET_ERROR; } // const tensor if (NewConstTensor(new_inputs, group_id) != lite::RET_OK) { MS_LOG(ERROR) << "new const tensor failed."; - FreeMemory(new_inputs, {}); + FreeGroupConvs(); + FreeCurrentConv(conv_param, new_inputs, {}); return lite::RET_ERROR; } // create new output tensor for (auto &output : origin_outputs_) { if (NewOutputTensor(new_outputs, output) != lite::RET_OK) { MS_LOG(ERROR) << "new output tensor failed."; - FreeMemory(new_inputs, new_outputs); + FreeGroupConvs(); + FreeCurrentConv(conv_param, new_inputs, new_outputs); return lite::RET_ERROR; } } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h index c0f844626f..2849bf4a7f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h @@ -57,7 +57,7 @@ class GroupConvCreator { void set_output_shape(const std::vector &shape) { output_shape_ = shape; } void set_filter_shape(const std::vector &shape) { filter_shape_ = shape; } void set_bias_shape(const std::vector &shape) { bias_shape_ = shape; } - bool CheckIfValidPoint(void *ptr); + void FreeGroupConvs(); int NewInputTensor(std::vector *tensors); int NewConstTensor(std::vector *tensors, int group_id); int NewOutputTensor(std::vector *tensors, lite::Tensor *output); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc index da773850ee..4410c1bf9e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,123 +15,11 @@ */ #include "src/runtime/kernel/arm/fp16/group_convolution_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" -#include "include/errorcode.h" -#include "src/runtime/infer_manager.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; namespace mindspore::kernel { -int GroupConvolutionFP16CPUKernel::Init() { - for (int i = 0; i < group_num_; ++i) { - auto ret = group_convs_.at(i)->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Sub kernel init failed."; - return ret; - } - } - // if infer shape is done, resize func will be invoked in sub kernels - return RET_OK; -} - -int GroupConvolutionFP16CPUKernel::ReSize() { - for (int i = 0; i < group_num_; ++i) { - auto ret = group_convs_.at(i)->ReSize(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Sub kernel resize failed."; - return RET_ERROR; - } - } - conv_param_->input_channel_ /= group_num_; - conv_param_->output_channel_ /= group_num_; - return RET_OK; -} - -void GroupConvolutionFP16CPUKernel::FreeSubKernel() { - for (auto &sub_conv : group_convs_) { - // free sub conv input tensors / output tensors manually - auto sub_in_tensors = sub_conv->in_tensors(); - auto sub_in_tensor_num = sub_in_tensors.size(); - for (size_t i = 0; i < sub_in_tensor_num; ++i) { - delete sub_in_tensors[i]; - sub_in_tensors[i] = nullptr; - } - auto sub_out_tensors = sub_conv->out_tensors(); - auto sub_out_tensor_num = sub_out_tensors.size(); - for (size_t i = 0; i < sub_out_tensor_num; ++i) { - delete sub_out_tensors[i]; - sub_out_tensors[i] = nullptr; - } - delete sub_conv; - sub_conv = nullptr; - } -} - -int GroupConvolutionFP16CPUKernel::PreProcess() { - if (!InferShapeDone()) { - op_parameter_->infer_flag_ = true; - - auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); - if (ret != 0) { - op_parameter_->infer_flag_ = false; - MS_LOG(ERROR) << "InferShape fail!"; - return ret; - } - - // if infershape func is called in runtime stage, we should malloc memory and set shape info for outputs of sub - // kernels here. - std::vector in_shape; - std::vector out_shape; - for (int i = 0; i < group_num_; ++i) { - // in - auto in_tensor = in_tensors_.front(); - in_shape = {in_tensor->Batch(), in_tensor->Height(), in_tensor->Width(), conv_param_->input_channel_}; - auto sub_kernel_in_tensor = group_convs_.at(i)->in_tensors().front(); - sub_kernel_in_tensor->set_shape(in_shape); - ret = sub_kernel_in_tensor->MallocData(); - if (ret != RET_OK) { - FreeSubKernel(); - MS_LOG(ERROR) << "sub kernel in tensor malloc data failed."; - return ret; - } - // out - auto out_tensor = out_tensors_.front(); - out_shape = {out_tensor->Batch(), out_tensor->Height(), out_tensor->Width(), conv_param_->output_channel_}; - auto sub_kernel_out_tensors = group_convs_[i]->out_tensors(); - for (auto tensor : sub_kernel_out_tensors) { - tensor->set_shape(out_shape); - ret = tensor->MallocData(); - if (ret != RET_OK) { - FreeSubKernel(); - MS_LOG(ERROR) << "sub kernel out tensor malloc data failed."; - return ret; - } - } - } - ret = ReSize(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "ReSize fail!ret: " << ret; - return ret; - } - } - - auto outputs = this->out_tensors(); - for (auto *output : outputs) { - MS_ASSERT(output != nullptr); - auto ret = output->MallocData(); - if (ret != RET_OK) { - FreeSubKernel(); - MS_LOG(ERROR) << "fp16 group conv out tensor malloc data failed."; - return ret; - } - } - return RET_OK; -} - int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) { // input may either be float32 or float16 auto in_tensor = in_tensors_.front(); @@ -173,7 +61,7 @@ int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) { return RET_OK; } -void GroupConvolutionFP16CPUKernel::PostConcat(int group_id) { +int GroupConvolutionFP16CPUKernel::PostConcat(int group_id) { // output is must float16 data type auto out_tensor = out_tensors_.front(); int out_plane = out_tensor->Height() * out_tensor->Width() * out_tensor->Batch(); @@ -182,34 +70,12 @@ void GroupConvolutionFP16CPUKernel::PostConcat(int group_id) { auto sub_out_data = reinterpret_cast(group_convs_.at(group_id)->out_tensors().front()->data_c()); MS_ASSERT(sub_out_data); float16_t *src_ptr = sub_out_data; - float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel; + float16_t *dst_ptr = reinterpret_cast(ori_out_data_) + group_id * sub_out_channel; for (int i = 0; i < out_plane; ++i) { memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(float16_t)); src_ptr += sub_out_channel; dst_ptr += ori_out_channel; } -} - -int GroupConvolutionFP16CPUKernel::Run() { - ori_in_data_ = in_tensors().front()->data_c(); - ori_out_data_ = reinterpret_cast(out_tensors().front()->data_c()); - MS_ASSERT(ori_out_data_); - for (int i = 0; i < group_num_; ++i) { - // first, separate group conv input into several parts. This step must be in runtime stage. - auto ret = SeparateInput(i); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Separate input failed."; - return ret; - } - // sun kernels run - ret = group_convs_.at(i)->Run(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; - return ret; - } - // post process, concat all outputs of sub-kernels into one output - PostConcat(i); - } return RET_OK; } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h index 010bc249da..8cf449cd47 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,34 +21,21 @@ #include #include "src/lite_kernel.h" #include "nnacl/op_base.h" -#include "src/runtime/kernel/arm/base/convolution_base.h" +#include "src/runtime/kernel/arm/base/group_convolution_base.h" #include "nnacl/fp16/conv_fp16.h" namespace mindspore::kernel { -class GroupConvolutionFP16CPUKernel : public ConvolutionBaseCPUKernel { +class GroupConvolutionFP16CPUKernel : public GroupConvolutionBaseCPUKernel { public: GroupConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, std::vector group_convs, const int group_num) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), - group_convs_(std::move(group_convs)), - group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if + : GroupConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, std::move(group_convs), group_num) { + } // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num - ~GroupConvolutionFP16CPUKernel() override { FreeSubKernel(); } - int Init() override; - int ReSize() override; - int Run() override; - int PreProcess() override; - int SeparateInput(int group_id); - void PostConcat(int group_id); - void FreeSubKernel(); - - private: - std::vector group_convs_; - void *ori_in_data_ = nullptr; // do not free - float16_t *ori_out_data_ = nullptr; // do not free - const int group_num_; + int SeparateInput(int group_id) override; + int PostConcat(int group_id) override; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc index 0564bd29ea..4591506dc7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ #include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" #include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" #include "src/runtime/kernel/arm/base/group_convolution_creator.h" -#include "src/runtime/kernel/arm/base/group_convolution.h" +#include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" #include "schema/model_generated.h" #include "include/errorcode.h" @@ -207,8 +207,8 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector(new_conv_param), new_inputs, new_outputs, ctx)); } return new (std::nothrow) - GroupConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, *(group_conv_creator.get_group_conv()), - reinterpret_cast(op_parameter)->group_); + GroupConvolutionFp32CPUKernel(op_parameter, inputs, outputs, ctx, *(group_conv_creator.get_group_conv()), + reinterpret_cast(op_parameter)->group_); } /* creator func */ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc new file mode 100644 index 0000000000..efa6d30fd1 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" + +using mindspore::lite::RET_OK; + +namespace mindspore::kernel { +int GroupConvolutionFp32CPUKernel::SeparateInput(int group_id) { + auto in_tensor = in_tensors_.front(); + int in_plane = in_tensor->Height() * in_tensor->Width() * in_tensor->Batch(); + int sub_in_channel = conv_param_->input_channel_; + int ori_in_channel = sub_in_channel * group_num_; + auto sub_in_data = reinterpret_cast(group_convs_.at(group_id)->in_tensors().front()->data_c()); + float *src_ptr = reinterpret_cast(ori_in_data_) + group_id * sub_in_channel; + float *dst_ptr = sub_in_data; + for (int i = 0; i < in_plane; ++i) { + memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(float)); + src_ptr += ori_in_channel; + dst_ptr += sub_in_channel; + } + return RET_OK; +} + +int GroupConvolutionFp32CPUKernel::PostConcat(int group_id) { + auto out_tensor = out_tensors_.front(); + int out_plane = out_tensor->Height() * out_tensor->Width() * out_tensor->Batch(); + int sub_out_channel = conv_param_->output_channel_; + int ori_out_channel = sub_out_channel * group_num_; + auto sub_out_data = reinterpret_cast(group_convs_.at(group_id)->out_tensors().front()->data_c()); + float *src_ptr = sub_out_data; + float *dst_ptr = reinterpret_cast(ori_out_data_) + group_id * sub_out_channel; + for (int i = 0; i < out_plane; ++i) { + memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(float)); + src_ptr += sub_out_channel; + dst_ptr += ori_out_channel; + } + return RET_OK; +} +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h new file mode 100644 index 0000000000..c71400ed80 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_FP32_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_FP32_H_ + +#include +#include +#include "src/lite_kernel.h" +#include "nnacl/op_base.h" +#include "src/runtime/kernel/arm/base/group_convolution_base.h" + +namespace mindspore::kernel { +class GroupConvolutionFp32CPUKernel : public GroupConvolutionBaseCPUKernel { + public: + GroupConvolutionFp32CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx, + std::vector group_convs, const int group_num) + : GroupConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, std::move(group_convs), group_num) { + } // opParameter(in channel, out channel) in this kernel has been split to groups, if + // you want to get real params, multiply in channel / out channel with group num + int SeparateInput(int group_id) override; + int PostConcat(int group_id) override; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_FP32_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc index 4a423fa7b3..4f9a344ad0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,10 @@ #include "src/runtime/kernel/arm/int8/group_convolution_int8.h" +using mindspore::lite::RET_OK; + namespace mindspore::kernel { -void GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { +int GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { int in_plane = conv_param_->input_h_ * conv_param_->input_w_ * conv_param_->input_batch_; int sub_in_channel = conv_param_->input_channel_; int ori_in_channel = sub_in_channel * group_num_; @@ -29,9 +31,10 @@ void GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { src_ptr += ori_in_channel; dst_ptr += sub_in_channel; } + return RET_OK; } -void GroupConvolutionInt8CPUKernel::PostConcat(int group_id) { +int GroupConvolutionInt8CPUKernel::PostConcat(int group_id) { int out_plane = conv_param_->output_h_ * conv_param_->output_w_ * conv_param_->output_batch_; int sub_out_channel = conv_param_->output_channel_; int ori_out_channel = sub_out_channel * group_num_; @@ -43,5 +46,6 @@ void GroupConvolutionInt8CPUKernel::PostConcat(int group_id) { src_ptr += sub_out_channel; dst_ptr += ori_out_channel; } + return RET_OK; } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h index 53a49cdf42..8fc6e8c56b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,19 +21,19 @@ #include #include "src/lite_kernel.h" #include "nnacl/op_base.h" -#include "src/runtime/kernel/arm/base/group_convolution.h" +#include "src/runtime/kernel/arm/base/group_convolution_base.h" namespace mindspore::kernel { -class GroupConvolutionInt8CPUKernel : public GroupConvolutionCPUKernel { +class GroupConvolutionInt8CPUKernel : public GroupConvolutionBaseCPUKernel { public: GroupConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, std::vector group_convs, const int group_num) - : GroupConvolutionCPUKernel(parameter, inputs, outputs, ctx, std::move(group_convs), group_num) { + : GroupConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, std::move(group_convs), group_num) { } // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num - void SeparateInput(int group_id) override; - void PostConcat(int group_id) override; + int SeparateInput(int group_id) override; + int PostConcat(int group_id) override; }; } // namespace mindspore::kernel