From: @ling_qiao_min Reviewed-by: @zhang_xue_tong,@hangangqiang Signed-off-by: @zhang_xue_tongpull/14586/MERGE
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" | |||
| #include "src/runtime/kernel/arm/base/group_convolution.h" | |||
| #include "src/runtime/infer_manager.h" | |||
| #include "include/errorcode.h" | |||
| @@ -139,7 +139,7 @@ void GroupConvolutionCPUKernel::SeparateInput(int group_id) { | |||
| int sub_in_channel = conv_param_->input_channel_; | |||
| int ori_in_channel = sub_in_channel * group_num_; | |||
| auto sub_in_data = reinterpret_cast<float *>(group_convs_.at(group_id)->in_tensors().front()->data_c()); | |||
| float *src_ptr = ori_in_data_ + group_id * sub_in_channel; | |||
| float *src_ptr = reinterpret_cast<float *>(ori_in_data_) + group_id * sub_in_channel; | |||
| float *dst_ptr = sub_in_data; | |||
| for (int i = 0; i < in_plane; ++i) { | |||
| memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(float)); | |||
| @@ -155,7 +155,7 @@ void GroupConvolutionCPUKernel::PostConcat(int group_id) { | |||
| int ori_out_channel = sub_out_channel * group_num_; | |||
| auto sub_out_data = reinterpret_cast<float *>(group_convs_.at(group_id)->out_tensors().front()->data_c()); | |||
| float *src_ptr = sub_out_data; | |||
| float *dst_ptr = ori_out_data_ + group_id * sub_out_channel; | |||
| float *dst_ptr = reinterpret_cast<float *>(ori_out_data_) + group_id * sub_out_channel; | |||
| for (int i = 0; i < out_plane; ++i) { | |||
| memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(float)); | |||
| src_ptr += sub_out_channel; | |||
| @@ -164,8 +164,8 @@ void GroupConvolutionCPUKernel::PostConcat(int group_id) { | |||
| } | |||
| int GroupConvolutionCPUKernel::Run() { | |||
| ori_in_data_ = reinterpret_cast<float *>(in_tensors().front()->data_c()); | |||
| ori_out_data_ = reinterpret_cast<float *>(out_tensors().front()->data_c()); | |||
| ori_in_data_ = in_tensors().front()->data_c(); | |||
| ori_out_data_ = out_tensors().front()->data_c(); | |||
| for (int i = 0; i < group_num_; ++i) { | |||
| // first, separate group conv input into several parts. This step must be in runtime stage. | |||
| SeparateInput(i); | |||
| @@ -14,8 +14,8 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_H_ | |||
| #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_H_ | |||
| #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ | |||
| #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ | |||
| #include <utility> | |||
| #include <vector> | |||
| @@ -47,11 +47,9 @@ class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel { | |||
| protected: | |||
| std::vector<kernel::LiteKernel *> group_convs_; | |||
| const int group_num_; | |||
| private: | |||
| float *ori_in_data_ = nullptr; // do not free | |||
| float *ori_out_data_ = nullptr; // do not free | |||
| void *ori_in_data_ = nullptr; // do not free | |||
| void *ori_out_data_ = nullptr; // do not free | |||
| }; | |||
| } // namespace mindspore::kernel | |||
| #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GROUP_CONVOLUTION_H_ | |||
| #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_H_ | |||
| @@ -0,0 +1,246 @@ | |||
| /** | |||
| * Copyright 2020-2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "src/runtime/kernel/arm/base/group_convolution_creator.h" | |||
| #include "src/runtime/kernel/arm/base/group_convolution.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_int8_creator.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h" | |||
| #include "src/runtime/kernel/arm/int8/group_convolution_int8.h" | |||
| namespace mindspore::kernel { | |||
| void CopyTensorQuantParam(lite::Tensor *dst, lite::Tensor *src) { | |||
| for (size_t i = 0; i < src->quant_params().size(); i++) { | |||
| dst->AddQuantParam(src->quant_params().at(i)); | |||
| } | |||
| } | |||
| ConvParameter *CreateNewConvParameter(ConvParameter *parameter) { | |||
| auto conv_parameter = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); | |||
| if (conv_parameter == nullptr) { | |||
| MS_LOG(ERROR) << "Malloc new conv parameter failed."; | |||
| return nullptr; | |||
| } | |||
| memcpy(conv_parameter, parameter, sizeof(ConvParameter)); | |||
| return conv_parameter; | |||
| } | |||
| void FreeMemory(ConvParameter *conv_param, const std::vector<lite::Tensor *> &new_inputs, | |||
| const std::vector<lite::Tensor *> &new_outputs) { | |||
| if (conv_param != nullptr) { | |||
| free(conv_param); | |||
| } | |||
| for (auto &in_tensor : new_inputs) { | |||
| delete in_tensor; | |||
| } | |||
| for (auto &out_tensor : new_outputs) { | |||
| delete out_tensor; | |||
| } | |||
| } | |||
| static inline lite::Tensor *TensorMalloc(lite::Tensor *tensor) { | |||
| if (tensor->MallocData() != lite::RET_OK) { | |||
| delete tensor; | |||
| MS_LOG(ERROR) << "malloc tensor data failed."; | |||
| return nullptr; | |||
| } | |||
| return tensor; | |||
| } | |||
| lite::Tensor *CreateConstTensor(lite::Tensor *tensor, const std::vector<int> &shape, const int index) { | |||
| auto new_tensor = new (std::nothrow) | |||
| lite::Tensor(tensor->data_type(), shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); | |||
| if (new_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "Create new_tensor failed."; | |||
| return nullptr; | |||
| } | |||
| auto ret = new_tensor->MallocData(); | |||
| if (ret != lite::RET_OK) { | |||
| delete new_tensor; | |||
| MS_LOG(ERROR) << "Malloc new_tensor failed."; | |||
| return nullptr; | |||
| } | |||
| memcpy(new_tensor->data_c(), reinterpret_cast<char *>(tensor->data_c()) + index * new_tensor->Size(), | |||
| new_tensor->Size()); | |||
| return new_tensor; | |||
| } | |||
| lite::Tensor *CreateVarTensor(const TensorInfo &tensor_info, bool inferred) { | |||
| auto tensor = new (std::nothrow) lite::Tensor(); | |||
| if (!tensor) { | |||
| MS_LOG(ERROR) << "new tensor failed."; | |||
| return nullptr; | |||
| } | |||
| tensor->set_data_type(tensor_info.data_type_); | |||
| tensor->set_format(tensor_info.format_); | |||
| tensor->set_category(tensor_info.tensor_type_); | |||
| if (tensor_info.is_in_) { | |||
| tensor->set_shape(tensor_info.shape_); | |||
| } | |||
| if (inferred) { | |||
| // set shape of out tensor | |||
| if (!tensor_info.is_in_) { | |||
| tensor->set_shape(tensor_info.shape_); | |||
| } | |||
| return TensorMalloc(tensor); | |||
| } | |||
| return tensor; | |||
| } | |||
| /* Class GroupConv Creator Implement Part*/ | |||
| void GroupConvCreator::CopyQuantParam(std::vector<lite::Tensor *> *tensors) { | |||
| for (size_t j = 0; j < origin_inputs_.size(); ++j) { | |||
| CopyTensorQuantParam(tensors->at(j), origin_inputs_.at(j)); | |||
| } | |||
| } | |||
| bool GroupConvCreator::CheckIfValidPoint(void *ptr) { | |||
| if (ptr == nullptr) { | |||
| for (auto &sub_conv : group_convs_) { | |||
| delete sub_conv; | |||
| } | |||
| return false; | |||
| } | |||
| return true; | |||
| } | |||
| int GroupConvCreator::NewInputTensor(std::vector<lite::Tensor *> *tensors) { | |||
| auto in_tensor = CreateVarTensor( | |||
| {input_shape_, schema::Format_NHWC, origin_inputs_.at(0)->data_type(), lite::Tensor::Category::VAR, true}, | |||
| infered_); | |||
| if (!CheckIfValidPoint(in_tensor)) { | |||
| return lite::RET_ERROR; | |||
| } | |||
| tensors->emplace_back(in_tensor); | |||
| return lite::RET_OK; | |||
| } | |||
| int GroupConvCreator::NewOutputTensor(std::vector<lite::Tensor *> *tensors, lite::Tensor *output) { | |||
| auto out_tensor = | |||
| CreateVarTensor({output_shape_, output->format(), output->data_type(), output->category(), false}, infered_); | |||
| if (!CheckIfValidPoint(out_tensor)) { | |||
| return lite::RET_ERROR; | |||
| } | |||
| if (is_quant_) { | |||
| CopyTensorQuantParam(out_tensor, output); | |||
| } | |||
| tensors->emplace_back(out_tensor); | |||
| return lite::RET_OK; | |||
| } | |||
| int GroupConvCreator::NewConstTensor(std::vector<lite::Tensor *> *tensors, int group_id) { | |||
| std::vector<std::pair<int, std::vector<int>>> const_tensor_list{std::make_pair(kWeightIndex, filter_shape_)}; | |||
| if (origin_inputs_.size() == 3) { | |||
| const_tensor_list.emplace_back(std::make_pair(kBiasIndex, bias_shape_)); | |||
| } | |||
| for (auto &info : const_tensor_list) { | |||
| auto const_tensor = CreateConstTensor(origin_inputs_.at(info.first), info.second, group_id); | |||
| if (!CheckIfValidPoint(const_tensor)) { | |||
| return lite::RET_ERROR; | |||
| } | |||
| tensors->emplace_back(const_tensor); | |||
| } | |||
| return lite::RET_OK; | |||
| } | |||
| void GroupConvCreator::SetShapeOfTensors() { | |||
| int new_in_channel = origin_inputs_.at(kWeightIndex)->Channel(); | |||
| int new_out_channel; | |||
| if (conv_param_->group_ == 0) { | |||
| MS_LOG(ERROR) << "Divisor 'group' cannot be 0."; | |||
| return; | |||
| } else { | |||
| new_out_channel = origin_inputs_.at(kWeightIndex)->Batch() / conv_param_->group_; | |||
| } | |||
| /* set shape */ | |||
| set_filter_shape({new_out_channel, conv_param_->kernel_h_, conv_param_->kernel_w_, new_in_channel}); | |||
| set_bias_shape({new_out_channel}); | |||
| if (infered_) { | |||
| conv_param_->input_channel_ = new_in_channel; | |||
| conv_param_->output_channel_ = new_out_channel; | |||
| set_input_shape({origin_inputs_.front()->Batch(), origin_inputs_.front()->Height(), origin_inputs_.front()->Width(), | |||
| new_in_channel}); | |||
| set_output_shape({origin_inputs_.front()->Batch(), origin_outputs_.front()->Height(), | |||
| origin_outputs_.front()->Width(), new_out_channel}); | |||
| } | |||
| } | |||
| int GroupConvCreator::CreatGroupConv() { | |||
| for (int i = 0; i < conv_param_->group_; ++i) { | |||
| auto new_conv_parameter = CreateNewConvParameter(conv_param_); | |||
| if (!CheckIfValidPoint(new_conv_parameter)) { | |||
| return lite::RET_ERROR; | |||
| } | |||
| // create new input for each group | |||
| std::vector<lite::Tensor *> new_inputs; | |||
| if (NewInputTensor(&new_inputs) != lite::RET_OK) { | |||
| MS_LOG(ERROR) << "new input tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, {}); | |||
| return lite::RET_ERROR; | |||
| } | |||
| // const tensor | |||
| if (NewConstTensor(&new_inputs, i) != lite::RET_OK) { | |||
| MS_LOG(ERROR) << "new const tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, {}); | |||
| return lite::RET_ERROR; | |||
| } | |||
| // create new output tensor | |||
| std::vector<lite::Tensor *> new_outputs; | |||
| for (auto &output : origin_outputs_) { | |||
| if (NewOutputTensor(&new_outputs, output) != lite::RET_OK) { | |||
| MS_LOG(ERROR) << "new output tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, new_outputs); | |||
| return lite::RET_ERROR; | |||
| } | |||
| } | |||
| if (is_quant_) { | |||
| CopyQuantParam(&new_inputs); | |||
| group_convs_.emplace_back(CpuConvInt8KernelSelect(new_inputs, new_outputs, | |||
| reinterpret_cast<OpParameter *>(new_conv_parameter), context_)); | |||
| } else { | |||
| group_convs_.emplace_back(new (std::nothrow) kernel::ConvolutionDelegateCPUKernel( | |||
| reinterpret_cast<OpParameter *>(new_conv_parameter), new_inputs, new_outputs, context_)); | |||
| } | |||
| } | |||
| return lite::RET_OK; | |||
| } | |||
| kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx) { | |||
| GroupConvCreator group_conv_creator(inputs, outputs, op_parameter, ctx, false); | |||
| group_conv_creator.SetShapeOfTensors(); | |||
| if (group_conv_creator.CreatGroupConv() != lite::RET_OK) { | |||
| MS_LOG(ERROR) << "Create fp32 group conv failed."; | |||
| return nullptr; | |||
| } | |||
| return new (std::nothrow) | |||
| GroupConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, group_conv_creator.get_group_conv(), | |||
| reinterpret_cast<ConvParameter *>(op_parameter)->group_); | |||
| } | |||
| kernel::LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx, int group) { | |||
| GroupConvCreator group_conv_creator(inputs, outputs, op_parameter, ctx, true); | |||
| group_conv_creator.SetShapeOfTensors(); | |||
| if (group_conv_creator.CreatGroupConv() != lite::RET_OK) { | |||
| MS_LOG(ERROR) << "Create int8 group conv failed."; | |||
| return nullptr; | |||
| } | |||
| return new (std::nothrow) | |||
| GroupConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, group_conv_creator.get_group_conv(), group); | |||
| } | |||
| } // namespace mindspore::kernel | |||
| @@ -0,0 +1,88 @@ | |||
| /** | |||
| * Copyright 2020-2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_CREATOR_H_ | |||
| #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_CREATOR_H_ | |||
| #include <utility> | |||
| #include <vector> | |||
| #include "src/lite_kernel.h" | |||
| #include "nnacl/conv_parameter.h" | |||
| namespace mindspore::kernel { | |||
| struct TensorInfo { | |||
| std::vector<int> shape_; | |||
| schema::Format format_; | |||
| TypeId data_type_; | |||
| lite::Tensor::Category tensor_type_; | |||
| bool is_in_; | |||
| }; | |||
| class GroupConvCreator { | |||
| public: | |||
| GroupConvCreator(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx, bool is_quant) | |||
| : origin_inputs_(std::move(inputs)), | |||
| origin_outputs_(std::move(outputs)), | |||
| context_(ctx), | |||
| infered_(op_parameter->infer_flag_), | |||
| is_quant_(is_quant) { | |||
| conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| } | |||
| ~GroupConvCreator() = default; | |||
| public: | |||
| void SetShapeOfTensors(); | |||
| int CreatGroupConv(); | |||
| std::vector<kernel::LiteKernel *> get_group_conv() { return group_convs_; } | |||
| protected: | |||
| void set_input_shape(const std::vector<int> &shape) { input_shape_ = shape; } | |||
| void set_output_shape(const std::vector<int> &shape) { output_shape_ = shape; } | |||
| void set_filter_shape(const std::vector<int> &shape) { filter_shape_ = shape; } | |||
| void set_bias_shape(const std::vector<int> &shape) { bias_shape_ = shape; } | |||
| void CopyQuantParam(std::vector<lite::Tensor *> *tensors); | |||
| bool CheckIfValidPoint(void *ptr); | |||
| int NewInputTensor(std::vector<lite::Tensor *> *tensors); | |||
| int NewConstTensor(std::vector<lite::Tensor *> *tensors, int group_id); | |||
| int NewOutputTensor(std::vector<lite::Tensor *> *tensors, lite::Tensor *output); | |||
| private: | |||
| std::vector<lite::Tensor *> origin_inputs_; | |||
| std::vector<lite::Tensor *> origin_outputs_; | |||
| std::vector<kernel::LiteKernel *> group_convs_; | |||
| std::vector<int> input_shape_; | |||
| std::vector<int> output_shape_; | |||
| std::vector<int> filter_shape_; | |||
| std::vector<int> bias_shape_; | |||
| const lite::InnerContext *context_; | |||
| ConvParameter *conv_param_; | |||
| bool infered_; | |||
| bool is_quant_; | |||
| }; | |||
| LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx); | |||
| LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx, int group); | |||
| } // namespace mindspore::kernel | |||
| #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_GROUP_CONVOLUTION_CREATOR_H_ | |||
| @@ -16,7 +16,6 @@ | |||
| #include "src/runtime/kernel/arm/fp16/convolution_delegate_fp16.h" | |||
| #include <vector> | |||
| #include "src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp16/convolution_fp16.h" | |||
| #include "src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h" | |||
| #include "src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h" | |||
| @@ -1,212 +0,0 @@ | |||
| /** | |||
| * Copyright 2020-2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include <vector> | |||
| #include "src/runtime/kernel/arm/fp32/convolution_creator_manager.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_1x1_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" | |||
| #include "nnacl/conv_parameter.h" | |||
| namespace mindspore::lite { | |||
| using mindspore::lite::Format::Format_NHWC; | |||
| static inline lite::Tensor *TensorMalloc(lite::Tensor *tensor) { | |||
| if (tensor->MallocData() != RET_OK) { | |||
| delete tensor; | |||
| MS_LOG(ERROR) << "malloc tensor data failed."; | |||
| return nullptr; | |||
| } | |||
| return tensor; | |||
| } | |||
| lite::Tensor *CreateConstTensor(lite::Tensor *tensor, const std::vector<int> &shape, const int index) { | |||
| auto new_tensor = | |||
| new (std::nothrow) lite::Tensor(tensor->data_type(), shape, Format_NHWC, lite::Tensor::Category::CONST_TENSOR); | |||
| if (new_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "Create new_tensor failed."; | |||
| return nullptr; | |||
| } | |||
| auto ret = new_tensor->MallocData(); | |||
| if (ret != RET_OK) { | |||
| delete new_tensor; | |||
| MS_LOG(ERROR) << "Malloc new_tensor failed."; | |||
| return nullptr; | |||
| } | |||
| memcpy(new_tensor->data_c(), reinterpret_cast<char *>(tensor->data_c()) + index * new_tensor->Size(), | |||
| new_tensor->Size()); | |||
| return new_tensor; | |||
| } | |||
| lite::Tensor *CreateVarTensor(const TensorInfo &tensor_info, bool inferred) { | |||
| auto tensor = new (std::nothrow) lite::Tensor(); | |||
| if (!tensor) { | |||
| MS_LOG(ERROR) << "new tensor failed."; | |||
| return nullptr; | |||
| } | |||
| tensor->set_data_type(tensor_info.data_type_); | |||
| tensor->set_format(tensor_info.format_); | |||
| tensor->set_category(tensor_info.tensor_type_); | |||
| if (tensor_info.is_in_) { | |||
| tensor->set_shape(tensor_info.shape_); | |||
| } | |||
| if (inferred) { | |||
| // set shape of out tensor | |||
| if (!tensor_info.is_in_) { | |||
| tensor->set_shape(tensor_info.shape_); | |||
| } | |||
| return TensorMalloc(tensor); | |||
| } | |||
| return tensor; | |||
| } | |||
| /* Kernel creator func part */ | |||
| kernel::LiteKernel *CpuConvInt8KernelSelect(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (conv_param->kernel_h_ == 3 && conv_param->kernel_w_ == 3 && conv_param->stride_h_ == 1 && | |||
| conv_param->stride_w_ == 1 && conv_param->dilation_h_ == 1 && conv_param->dilation_w_ == 1) { | |||
| #ifdef ENABLE_ARM64 | |||
| if (mindspore::lite::IsSupportSDot()) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } else { | |||
| kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| #else | |||
| kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| #endif | |||
| } else if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) { | |||
| kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } else { | |||
| kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| kernel::LiteKernel *DispatchConvDw(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, | |||
| const InnerContext *ctx) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (opParameter != nullptr && opParameter->infer_flag_) { | |||
| #if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) | |||
| if (CheckConvDw1DWinograd(conv_param, ctx->thread_num_)) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3CPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| #endif | |||
| #if defined(ENABLE_ARM64) || defined(ENABLE_AVX) | |||
| if (kernel == nullptr && CheckConvDwUseIndirectBuffer(conv_param)) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseIndirectCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| #endif | |||
| if (kernel == nullptr && conv_param->input_channel_ < 32) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| } | |||
| if (kernel == nullptr) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| kernel::LiteKernel *DispatchGroupConv(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx) { | |||
| GroupConvCreator group_conv_creator(inputs, outputs, op_parameter, ctx, false); | |||
| group_conv_creator.SetShapeOfTensors(); | |||
| if (group_conv_creator.CreatGroupConv() != RET_OK) { | |||
| MS_LOG(ERROR) << "Create group conv failed."; | |||
| return nullptr; | |||
| } | |||
| return new (std::nothrow) | |||
| kernel::GroupConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, group_conv_creator.get_group_conv(), | |||
| reinterpret_cast<ConvParameter *>(op_parameter)->group_); | |||
| } | |||
| /* Class GroupConv Creator Implement Part*/ | |||
| void GroupConvCreator::SetShapeOfTensors() { | |||
| int new_in_channel = origin_inputs_.at(kWeightIndex)->Channel(); | |||
| int new_out_channel; | |||
| if (conv_param_->group_ == 0) { | |||
| MS_LOG(ERROR) << "Divisor 'group' cannot be 0."; | |||
| return; | |||
| } else { | |||
| new_out_channel = origin_inputs_.at(kWeightIndex)->Batch() / conv_param_->group_; | |||
| } | |||
| /* set shape */ | |||
| set_filter_shape({new_out_channel, conv_param_->kernel_h_, conv_param_->kernel_w_, new_in_channel}); | |||
| set_bias_shape({new_out_channel}); | |||
| if (infered_) { | |||
| conv_param_->input_channel_ = new_in_channel; | |||
| conv_param_->output_channel_ = new_out_channel; | |||
| set_input_shape({origin_inputs_.front()->Batch(), origin_inputs_.front()->Height(), origin_inputs_.front()->Width(), | |||
| new_in_channel}); | |||
| set_output_shape({origin_inputs_.front()->Batch(), origin_outputs_.front()->Height(), | |||
| origin_outputs_.front()->Width(), new_out_channel}); | |||
| } | |||
| } | |||
| int GroupConvCreator::CreatGroupConv() { | |||
| for (int i = 0; i < conv_param_->group_; ++i) { | |||
| auto new_conv_parameter = CreateNewConvParameter(conv_param_); | |||
| if (!CheckIfValidPoint(new_conv_parameter)) { | |||
| return RET_ERROR; | |||
| } | |||
| // create new input for each group | |||
| std::vector<lite::Tensor *> new_inputs; | |||
| if (NewInputTensor(&new_inputs) != RET_OK) { | |||
| MS_LOG(ERROR) << "new input tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, {}); | |||
| return RET_ERROR; | |||
| } | |||
| // const tensor | |||
| if (NewConstTensor(&new_inputs, i) != RET_OK) { | |||
| MS_LOG(ERROR) << "new const tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, {}); | |||
| return RET_ERROR; | |||
| } | |||
| // create new output tensor | |||
| std::vector<lite::Tensor *> new_outputs; | |||
| for (auto &output : origin_outputs_) { | |||
| if (NewOutputTensor(&new_outputs, output) != RET_OK) { | |||
| MS_LOG(ERROR) << "new output tensor failed."; | |||
| FreeMemory(new_conv_parameter, new_inputs, new_outputs); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (is_quant_) { | |||
| CopyQuantParam(&new_inputs); | |||
| group_convs_.emplace_back(CpuConvInt8KernelSelect(new_inputs, new_outputs, | |||
| reinterpret_cast<OpParameter *>(new_conv_parameter), context_)); | |||
| } else { | |||
| group_convs_.emplace_back(new (std::nothrow) kernel::ConvolutionDelegateCPUKernel( | |||
| reinterpret_cast<OpParameter *>(new_conv_parameter), new_inputs, new_outputs, context_)); | |||
| } | |||
| } | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite | |||
| @@ -1,180 +0,0 @@ | |||
| /** | |||
| * Copyright 2020-2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_CREATOR_H_ | |||
| #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_CREATOR_H_ | |||
| #include <utility> | |||
| #include <vector> | |||
| #include "src/lite_kernel.h" | |||
| #include "nnacl/conv_parameter.h" | |||
| namespace mindspore::lite { | |||
| using Category = lite::Tensor::Category; | |||
| using Format = mindspore::schema::Format; | |||
| struct TensorInfo { | |||
| std::vector<int> shape_; | |||
| Format format_; | |||
| TypeId data_type_; | |||
| Category tensor_type_; | |||
| bool is_in_; | |||
| }; | |||
| inline void CopyTensorQuantParam(lite::Tensor *dst, lite::Tensor *src) { | |||
| for (size_t i = 0; i < src->quant_params().size(); i++) { | |||
| dst->AddQuantParam(src->quant_params().at(i)); | |||
| } | |||
| } | |||
| inline ConvParameter *CreateNewConvParameter(ConvParameter *parameter) { | |||
| auto conv_parameter = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); | |||
| if (conv_parameter == nullptr) { | |||
| MS_LOG(ERROR) << "Malloc new conv parameter failed."; | |||
| return nullptr; | |||
| } | |||
| memcpy(conv_parameter, parameter, sizeof(ConvParameter)); | |||
| return conv_parameter; | |||
| } | |||
| inline void FreeMemory(ConvParameter *conv_param, const std::vector<lite::Tensor *> &new_inputs, | |||
| const std::vector<lite::Tensor *> &new_outputs) { | |||
| if (conv_param != nullptr) { | |||
| free(conv_param); | |||
| } | |||
| for (auto &in_tensor : new_inputs) { | |||
| delete in_tensor; | |||
| } | |||
| for (auto &out_tensor : new_outputs) { | |||
| delete out_tensor; | |||
| } | |||
| } | |||
| lite::Tensor *CreateVarTensor(const TensorInfo &tensor_info, bool inferred); | |||
| lite::Tensor *CreateConstTensor(lite::Tensor *tensor, const std::vector<int> &shape, int index); | |||
| kernel::LiteKernel *CpuConvInt8KernelSelect(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx); | |||
| kernel::LiteKernel *DispatchConvDw(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, | |||
| const InnerContext *ctx); | |||
| kernel::LiteKernel *DispatchGroupConv(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx); | |||
| class GroupConvCreator { | |||
| public: | |||
| GroupConvCreator(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, bool is_quant) | |||
| : origin_inputs_(std::move(inputs)), | |||
| origin_outputs_(std::move(outputs)), | |||
| context_(ctx), | |||
| infered_(op_parameter->infer_flag_), | |||
| is_quant_(is_quant) { | |||
| conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| } | |||
| ~GroupConvCreator() = default; | |||
| public: | |||
| void SetShapeOfTensors(); | |||
| void set_input_shape(const std::vector<int> &shape) { input_shape_ = shape; } | |||
| void set_output_shape(const std::vector<int> &shape) { output_shape_ = shape; } | |||
| void set_filter_shape(const std::vector<int> &shape) { filter_shape_ = shape; } | |||
| void set_bias_shape(const std::vector<int> &shape) { bias_shape_ = shape; } | |||
| std::vector<kernel::LiteKernel *> get_group_conv() { return group_convs_; } | |||
| int CreatGroupConv(); | |||
| protected: | |||
| void FreeSubConv() { | |||
| for (auto &sub_conv : group_convs_) { | |||
| delete sub_conv; | |||
| } | |||
| } | |||
| bool CheckIfValidPoint(void *ptr) { | |||
| if (ptr == nullptr) { | |||
| MS_LOG(ERROR) << "pointer is nullptr."; | |||
| FreeSubConv(); | |||
| return false; | |||
| } | |||
| return true; | |||
| } | |||
| int NewInputTensor(std::vector<lite::Tensor *> *tensors) { | |||
| auto in_tensor = CreateVarTensor( | |||
| {input_shape_, Format::Format_NHWC, origin_inputs_.at(0)->data_type(), Category::VAR, true}, infered_); | |||
| if (!CheckIfValidPoint(in_tensor)) { | |||
| return RET_ERROR; | |||
| } | |||
| tensors->emplace_back(in_tensor); | |||
| return RET_OK; | |||
| } | |||
| int NewConstTensor(std::vector<lite::Tensor *> *tensors, int group_id) { | |||
| std::vector<std::pair<int, std::vector<int>>> const_tensor_list{std::make_pair(kWeightIndex, filter_shape_)}; | |||
| if (origin_inputs_.size() == 3) { | |||
| const_tensor_list.emplace_back(std::make_pair(kBiasIndex, bias_shape_)); | |||
| } | |||
| for (auto &info : const_tensor_list) { | |||
| auto const_tensor = CreateConstTensor(origin_inputs_.at(info.first), info.second, group_id); | |||
| if (!CheckIfValidPoint(const_tensor)) { | |||
| return RET_ERROR; | |||
| } | |||
| tensors->emplace_back(const_tensor); | |||
| } | |||
| return RET_OK; | |||
| } | |||
| int NewOutputTensor(std::vector<lite::Tensor *> *tensors, lite::Tensor *output) { | |||
| auto out_tensor = | |||
| CreateVarTensor({output_shape_, output->format(), output->data_type(), output->category(), false}, infered_); | |||
| if (!CheckIfValidPoint(out_tensor)) { | |||
| return RET_ERROR; | |||
| } | |||
| if (is_quant_) { | |||
| CopyTensorQuantParam(out_tensor, output); | |||
| } | |||
| tensors->emplace_back(out_tensor); | |||
| return RET_OK; | |||
| } | |||
| void CopyQuantParam(std::vector<lite::Tensor *> *tensors) { | |||
| for (size_t j = 0; j < origin_inputs_.size(); ++j) { | |||
| CopyTensorQuantParam(tensors->at(j), origin_inputs_.at(j)); | |||
| } | |||
| } | |||
| private: | |||
| std::vector<lite::Tensor *> origin_inputs_; | |||
| std::vector<lite::Tensor *> origin_outputs_; | |||
| std::vector<kernel::LiteKernel *> group_convs_; | |||
| std::vector<int> input_shape_; | |||
| std::vector<int> output_shape_; | |||
| std::vector<int> filter_shape_; | |||
| std::vector<int> bias_shape_; | |||
| const InnerContext *context_; | |||
| ConvParameter *conv_param_; | |||
| bool infered_; | |||
| bool is_quant_; | |||
| }; | |||
| } // namespace mindspore::lite | |||
| #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_CREATOR_H_ | |||
| @@ -14,12 +14,16 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "src/kernel_registry.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_creator_manager.h" | |||
| #include "src/kernel_registry.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" | |||
| #include "src/runtime/kernel/arm/base/group_convolution_creator.h" | |||
| #include "schema/model_generated.h" | |||
| #include "include/errorcode.h" | |||
| @@ -157,6 +161,32 @@ kernel::LiteKernel *ConvolutionDelegateCPUKernel::CpuConvFp32KernelSelect() { | |||
| return kernel; | |||
| } | |||
| kernel::LiteKernel *DispatchConvDw(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, | |||
| const InnerContext *ctx) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (opParameter != nullptr && opParameter->infer_flag_) { | |||
| #if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) | |||
| if (CheckConvDw1DWinograd(conv_param, ctx->thread_num_)) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3CPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| #endif | |||
| #if defined(ENABLE_ARM64) || defined(ENABLE_AVX) | |||
| if (kernel == nullptr && CheckConvDwUseIndirectBuffer(conv_param)) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseIndirectCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| #endif | |||
| if (kernel == nullptr && conv_param->input_channel_ < 32) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| } | |||
| if (kernel == nullptr) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| /* creator func */ | |||
| kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| @@ -172,7 +202,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> & | |||
| } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { | |||
| kernel = DispatchConvDw(inputs, outputs, op_parameter, ctx); | |||
| } else { | |||
| kernel = DispatchGroupConv(inputs, outputs, op_parameter, ctx); | |||
| kernel = CpuGroupConvFp32KernelCreator(inputs, outputs, op_parameter, ctx); | |||
| } | |||
| if (kernel == nullptr) { | |||
| @@ -18,7 +18,6 @@ | |||
| #include <vector> | |||
| #include "src/lite_kernel.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_creator_manager.h" | |||
| #include "nnacl/conv_parameter.h" | |||
| #include "nnacl/op_base.h" | |||
| @@ -19,24 +19,13 @@ | |||
| #include "nnacl/int8/conv_int8.h" | |||
| #include "schema/model_generated.h" | |||
| #include "src/kernel_registry.h" | |||
| #include "src/runtime/kernel/arm/fp32/convolution_creator_manager.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_1x1_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/group_convolution_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h" | |||
| #include "src/runtime/runtime_api.h" | |||
| #ifdef ENABLE_ARM64 | |||
| #include "src/runtime/kernel/arm/int8/opt_op_handler.h" | |||
| #endif | |||
| using mindspore::kernel::KERNEL_ARCH::kCPU; | |||
| using mindspore::lite::KernelRegistrar; | |||
| using mindspore::lite::RET_ERROR; | |||
| using mindspore::lite::RET_OK; | |||
| using mindspore::schema::PrimitiveType_Conv2DFusion; | |||
| using mindspore::schema::Format::Format_NHWC; | |||
| namespace mindspore::kernel { | |||
| void ConvolutionInt8CPUKernel::CheckSupportOptimize() { | |||
| @@ -243,73 +232,4 @@ int ConvolutionInt8CPUKernel::Run() { | |||
| FreeTmpBuffer(); | |||
| return RET_OK; | |||
| } | |||
| kernel::LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, int group) { | |||
| lite::GroupConvCreator group_conv_creator(inputs, outputs, op_parameter, ctx, true); | |||
| group_conv_creator.SetShapeOfTensors(); | |||
| if (group_conv_creator.CreatGroupConv() != RET_OK) { | |||
| MS_LOG(ERROR) << "Create group conv failed."; | |||
| return nullptr; | |||
| } | |||
| return new (std::nothrow) | |||
| GroupConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, group_conv_creator.get_group_conv(), group); | |||
| } | |||
| kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, const kernel::KernelKey &desc) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| auto act_quant_size = | |||
| MSMAX(inputs.at(kInputIndex)->quant_params().size(), outputs.at(kOutputIndex)->quant_params().size()); | |||
| if (act_quant_size == 1) { // per tensor | |||
| if (CheckConvDwUse3X3(conv_param) && conv_param->input_channel_ % C8NUM == 0) { | |||
| #ifdef ENABLE_ARM64 | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| #endif | |||
| } | |||
| if (kernel == nullptr) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| } else { // per channel | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, const kernel::KernelKey &desc) { | |||
| MS_ASSERT(op_parameter != nullptr); | |||
| MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (conv_param->group_ == 1) { | |||
| kernel = CpuConvInt8KernelSelect(inputs, outputs, op_parameter, ctx); | |||
| } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { | |||
| kernel = CpuConvDwInt8KernelCreator(inputs, outputs, op_parameter, ctx, desc); | |||
| } else { | |||
| MS_ASSERT(conv_param->group_ > 1); | |||
| kernel = CpuGroupConvInt8KernelCreator(inputs, outputs, op_parameter, ctx, conv_param->group_); | |||
| } | |||
| if (kernel == nullptr) { | |||
| MS_LOG(ERROR) << "kernel is nullptr."; | |||
| free(op_parameter); | |||
| return nullptr; | |||
| } | |||
| auto ret = kernel->Init(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " | |||
| << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); | |||
| delete kernel; | |||
| return nullptr; | |||
| } | |||
| return kernel; | |||
| } | |||
| REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CpuConvInt8KernelCreator) | |||
| } // namespace mindspore::kernel | |||
| @@ -0,0 +1,118 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "src/runtime/kernel/arm/int8/convolution_int8_creator.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_1x1_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h" | |||
| #include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h" | |||
| #include "src/runtime/kernel/arm/base/group_convolution_creator.h" | |||
| #include "schema/model_generated.h" | |||
| #include "src/kernel_registry.h" | |||
| #include "include/errorcode.h" | |||
| #include "src/runtime/runtime_api.h" | |||
| using mindspore::kernel::KERNEL_ARCH::kCPU; | |||
| using mindspore::lite::KernelRegistrar; | |||
| using mindspore::lite::RET_ERROR; | |||
| using mindspore::lite::RET_OK; | |||
| using mindspore::schema::PrimitiveType_Conv2DFusion; | |||
| using mindspore::schema::Format::Format_NHWC; | |||
| namespace mindspore::kernel { | |||
| kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, const kernel::KernelKey &desc) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| auto act_quant_size = | |||
| MSMAX(inputs.at(kInputIndex)->quant_params().size(), outputs.at(kOutputIndex)->quant_params().size()); | |||
| if (act_quant_size == 1) { // per tensor | |||
| if (CheckConvDwUse3X3(conv_param) && conv_param->input_channel_ % C8NUM == 0) { | |||
| #ifdef ENABLE_ARM64 | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| #endif | |||
| } | |||
| if (kernel == nullptr) { | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| } else { // per channel | |||
| kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| /* Kernel creator func part */ | |||
| kernel::LiteKernel *CpuConvInt8KernelSelect(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx) { | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (conv_param->kernel_h_ == 3 && conv_param->kernel_w_ == 3 && conv_param->stride_h_ == 1 && | |||
| conv_param->stride_w_ == 1 && conv_param->dilation_h_ == 1 && conv_param->dilation_w_ == 1) { | |||
| #ifdef ENABLE_ARM64 | |||
| if (mindspore::lite::IsSupportSDot()) { | |||
| kernel = new (std::nothrow) ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } else { | |||
| kernel = new (std::nothrow) Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| #else | |||
| kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| #endif | |||
| } else if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) { | |||
| kernel = new (std::nothrow) Convolution1x1Int8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } else { | |||
| kernel = new (std::nothrow) ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); | |||
| } | |||
| return kernel; | |||
| } | |||
| kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const InnerContext *ctx, const kernel::KernelKey &desc) { | |||
| MS_ASSERT(op_parameter != nullptr); | |||
| MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); | |||
| auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); | |||
| kernel::LiteKernel *kernel = nullptr; | |||
| if (conv_param->group_ == 1) { | |||
| kernel = CpuConvInt8KernelSelect(inputs, outputs, op_parameter, ctx); | |||
| } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { | |||
| kernel = CpuConvDwInt8KernelCreator(inputs, outputs, op_parameter, ctx, desc); | |||
| } else { | |||
| MS_ASSERT(conv_param->group_ > 1); | |||
| kernel = CpuGroupConvInt8KernelCreator(inputs, outputs, op_parameter, ctx, conv_param->group_); | |||
| } | |||
| if (kernel == nullptr) { | |||
| MS_LOG(ERROR) << "kernel is nullptr."; | |||
| free(op_parameter); | |||
| return nullptr; | |||
| } | |||
| auto ret = kernel->Init(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " | |||
| << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); | |||
| delete kernel; | |||
| return nullptr; | |||
| } | |||
| return kernel; | |||
| } | |||
| REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CpuConvInt8KernelCreator) | |||
| } // namespace mindspore::kernel | |||
| @@ -0,0 +1,29 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_INT8_CREATOR_H_ | |||
| #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_INT8_CREATOR_H_ | |||
| #include <vector> | |||
| #include "nnacl/op_base.h" | |||
| #include "src/lite_kernel.h" | |||
| namespace mindspore::kernel { | |||
| LiteKernel *CpuConvInt8KernelSelect(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, | |||
| const lite::InnerContext *ctx); | |||
| } | |||
| #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_INT8_CREATOR_H_ | |||
| @@ -15,14 +15,6 @@ | |||
| */ | |||
| #include "src/runtime/kernel/arm/int8/group_convolution_int8.h" | |||
| #include "schema/model_generated.h" | |||
| #include "src/kernel_registry.h" | |||
| #include "include/errorcode.h" | |||
| using mindspore::kernel::KERNEL_ARCH::kCPU; | |||
| using mindspore::lite::KernelRegistrar; | |||
| using mindspore::lite::RET_ERROR; | |||
| using mindspore::lite::RET_OK; | |||
| namespace mindspore::kernel { | |||
| void GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { | |||
| @@ -30,7 +22,7 @@ void GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { | |||
| int sub_in_channel = conv_param_->input_channel_; | |||
| int ori_in_channel = sub_in_channel * group_num_; | |||
| auto sub_in_data = reinterpret_cast<int8_t *>(group_convs_.at(group_id)->in_tensors().front()->data_c()); | |||
| int8_t *src_ptr = ori_in_data_ + group_id * sub_in_channel; | |||
| int8_t *src_ptr = reinterpret_cast<int8_t *>(ori_in_data_) + group_id * sub_in_channel; | |||
| int8_t *dst_ptr = sub_in_data; | |||
| for (int i = 0; i < in_plane; ++i) { | |||
| memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(int8_t)); | |||
| @@ -45,29 +37,11 @@ void GroupConvolutionInt8CPUKernel::PostConcat(int group_id) { | |||
| int ori_out_channel = sub_out_channel * group_num_; | |||
| auto sub_out_data = reinterpret_cast<int8_t *>(group_convs_.at(group_id)->out_tensors().front()->data_c()); | |||
| int8_t *src_ptr = sub_out_data; | |||
| int8_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel; | |||
| int8_t *dst_ptr = reinterpret_cast<int8_t *>(ori_out_data_) + group_id * sub_out_channel; | |||
| for (int i = 0; i < out_plane; ++i) { | |||
| memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(int8_t)); | |||
| src_ptr += sub_out_channel; | |||
| dst_ptr += ori_out_channel; | |||
| } | |||
| } | |||
| int GroupConvolutionInt8CPUKernel::Run() { | |||
| ori_in_data_ = reinterpret_cast<int8_t *>(in_tensors().front()->data_c()); | |||
| ori_out_data_ = reinterpret_cast<int8_t *>(out_tensors().front()->data_c()); | |||
| for (int i = 0; i < group_num_; ++i) { | |||
| // first, separate group conv input into several parts. This step must be in runtime stage. | |||
| SeparateInput(i); | |||
| // sun kernels run | |||
| auto ret = group_convs_.at(i)->Run(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "sub kernel " << i << " execute failed."; | |||
| return ret; | |||
| } | |||
| // post process, concat all outputs of sub-kernels into one output | |||
| PostConcat(i); | |||
| } | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::kernel | |||
| @@ -21,7 +21,7 @@ | |||
| #include <vector> | |||
| #include "src/lite_kernel.h" | |||
| #include "nnacl/op_base.h" | |||
| #include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" | |||
| #include "src/runtime/kernel/arm/base/group_convolution.h" | |||
| namespace mindspore::kernel { | |||
| class GroupConvolutionInt8CPUKernel : public GroupConvolutionCPUKernel { | |||
| @@ -32,14 +32,8 @@ class GroupConvolutionInt8CPUKernel : public GroupConvolutionCPUKernel { | |||
| : GroupConvolutionCPUKernel(parameter, inputs, outputs, ctx, std::move(group_convs), group_num) { | |||
| } // opParameter(in channel, out channel) in this kernel has been split to groups, if | |||
| // you want to get real params, multiply in channel / out channel with group num | |||
| int Run() override; | |||
| void SeparateInput(int group_id) override; | |||
| void PostConcat(int group_id) override; | |||
| private: | |||
| int8_t *ori_in_data_ = nullptr; // do not free | |||
| int8_t *ori_out_data_ = nullptr; // do not free | |||
| }; | |||
| } // namespace mindspore::kernel | |||