Browse Source

!8216 [MS][LITE][CPU]add fp16 group conv creator

Merge pull request !8216 from fuzhiye/tmp
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
83bd4fa812
6 changed files with 614 additions and 39 deletions
  1. +218
    -9
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc
  2. +213
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc
  3. +56
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h
  4. +83
    -8
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc
  5. +42
    -6
      mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc
  6. +2
    -16
      mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h

+ 218
- 9
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -18,6 +18,7 @@
#include <vector>
#include "src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h"
#include "src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h"
#include "src/runtime/kernel/arm/fp16/group_convolution_fp16.h"
#include "nnacl/fp16/conv_fp16.h"
#include "nnacl/fp16/cast_fp16.h"
#include "nnacl/fp16/pack_fp16.h"
@@ -34,6 +35,7 @@ using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Conv2D;
using mindspore::schema::Format::Format_NHWC;

namespace mindspore::kernel {
int ConvolutionFP16CPUKernel::InitWeightBias() {
@@ -173,6 +175,217 @@ int ConvolutionFP16CPUKernel::Run() {
return RET_OK;
}

ConvParameter *CreateNewConvParameterFp16(ConvParameter *parameter) {
auto conv_parameter = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter)));
if (conv_parameter == nullptr) {
MS_LOG(ERROR) << "Malloc new conv parameter failed.";
return nullptr;
}
memcpy(conv_parameter, parameter, sizeof(ConvParameter));
return conv_parameter;
}

kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive,
bool use_winograd, int out_unit) {
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter);
if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) {
return new (std::nothrow) kernel::Convolution1x1FP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
} else if (use_winograd) {
return new (std::nothrow)
kernel::ConvolutionWinogradFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, out_unit);
} else {
return new (std::nothrow) kernel::ConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
}
return nullptr;
}

void FreeMemoryFp16(std::vector<kernel::LiteKernel *> group_convs, std::vector<lite::Tensor *> new_inputs,
std::vector<lite::Tensor *> new_outputs) {
for (auto sub_conv : group_convs) {
if (sub_conv != nullptr) {
delete sub_conv;
}
}
for (auto in_tensor : new_inputs) {
if (in_tensor != nullptr) {
delete in_tensor;
}
}
for (auto out_tensor : new_outputs) {
if (out_tensor != nullptr) {
delete out_tensor;
}
}
}

kernel::LiteKernel *CpuGroupConvFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive,
int group) {
std::vector<kernel::LiteKernel *> group_convs;
std::vector<int> in_shape;
std::vector<int> filter_shape;
std::vector<int> bias_shape;
std::vector<int> out_shape;

auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter);
int out_channel = inputs.at(kWeightIndex)->Batch();
int new_in_channel = inputs.at(kWeightIndex)->Channel();
int new_out_channel = 0;
if (group == 0) {
MS_LOG(ERROR) << "Divisor 'group' cannot be 0.";
return nullptr;
} else {
new_out_channel = out_channel / group;
}
int kernel_h = conv_param->kernel_h_;
int kernel_w = conv_param->kernel_w_;
int input_num = inputs.size();
int output_num = outputs.size();
bool has_bias = input_num == 3;
bool use_winograd = false;
int out_unit;
bool infered_flag = (primitive != nullptr && primitive->GetInferFlag());

if (infered_flag) {
int batch = inputs.front()->Batch();
int in_h = inputs.front()->Height();
int in_w = inputs.front()->Width();
conv_param->input_channel_ = new_in_channel;
conv_param->output_channel_ = new_out_channel;
CheckIfUseWinogradFp16(&use_winograd, &out_unit, conv_param);
in_shape = {batch, in_h, in_w, new_in_channel};
out_shape = {batch, conv_param->output_h_, conv_param->output_w_, new_out_channel};
}

filter_shape = {new_out_channel, kernel_h, kernel_w, new_in_channel};
bias_shape = {new_out_channel};

for (int i = 0; i < group; ++i) {
std::vector<lite::Tensor *> new_inputs;
std::vector<lite::Tensor *> new_outputs;
auto new_conv_parameter = CreateNewConvParameterFp16(conv_param);
if (new_conv_parameter == nullptr) {
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "Get new conv parameter failed.";
return nullptr;
}
// get new input for each group
auto in_tensor =
new (std::nothrow) lite::Tensor(inputs.front()->data_type(), in_shape, Format_NHWC, lite::Tensor::Category::VAR);
if (in_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new in_tensor failed.";
return nullptr;
}
if (infered_flag) {
auto ret = in_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete in_tensor;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "in tensor malloc failed.";
return nullptr;
}
}
new_inputs.emplace_back(in_tensor);

// new weight
auto filter_tensor = new (std::nothrow) lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape,
Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
if (filter_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new filter_tensor failed.";
return nullptr;
}
auto ret = filter_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete filter_tensor;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "filter_tensor malloc failed.";
return nullptr;
}
int copy_length = kernel_h * kernel_w * new_in_channel * new_out_channel;
auto filter_data_type = inputs.at(kWeightIndex)->data_type();
if (filter_data_type == kNumberTypeFloat16) {
auto *origin_weight = reinterpret_cast<float16_t *>(inputs.at(kWeightIndex)->data_c());
memcpy(filter_tensor->data_c(), origin_weight + i * copy_length, copy_length * sizeof(float16_t));
} else {
MS_ASSERT(filter_data_type == kNumberTypeFloat32);
auto *origin_weight = reinterpret_cast<float *>(inputs.at(kWeightIndex)->data_c());
memcpy(filter_tensor->data_c(), origin_weight + i * copy_length, copy_length * sizeof(float));
}
new_inputs.emplace_back(filter_tensor);

// if has bias, set new bias
if (has_bias) {
auto *origin_bias = inputs.at(kBiasIndex)->data_c();
auto bias_data_type = inputs.at(kBiasIndex)->data_type();
auto bias_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
if (bias_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new bias_tensor failed.";
return nullptr;
}
ret = bias_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete bias_tensor;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "bias_tensor malloc failed.";
return nullptr;
}
if (bias_data_type == kNumberTypeFloat16) {
auto bias_data = reinterpret_cast<float16_t *>(origin_bias);
memcpy(bias_tensor->data_c(), bias_data + i * new_out_channel, new_out_channel * sizeof(float16_t));
} else {
MS_ASSERT(bias_data_type == kNumberTypeFloat32);
auto bias_data = reinterpret_cast<float *>(origin_bias);
memcpy(bias_tensor->data_c(), bias_data + i * new_out_channel, new_out_channel * sizeof(float));
}
new_inputs.emplace_back(bias_tensor);
}

// set new output tensor
for (int j = 0; j < output_num; ++j) {
auto tmp_out_tensor = new (std::nothrow) lite::Tensor();
if (tmp_out_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new tmp_out_tensor failed.";
return nullptr;
}
tmp_out_tensor->set_data_type(outputs.at(j)->data_type());
tmp_out_tensor->SetFormat(outputs.at(j)->GetFormat());
if (infered_flag) {
tmp_out_tensor->set_shape(out_shape);
ret = tmp_out_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete tmp_out_tensor;
FreeMemoryFp16(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "tmp_out_tensor malloc data failed.";
return nullptr;
}
}
new_outputs.emplace_back(tmp_out_tensor);
}

group_convs.emplace_back(CpuConvFp16KernelSelect(new_inputs, new_outputs,
reinterpret_cast<OpParameter *>(new_conv_parameter), ctx,
primitive, use_winograd, out_unit));
}
return new (std::nothrow)
GroupConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, group_convs, group);
}

kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
@@ -197,8 +410,6 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
}

auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
int kernel_h = conv_param->kernel_h_;
int kernel_w = conv_param->kernel_w_;
bool use_winograd = false;
int out_unit;
if (primitive != nullptr && primitive->GetInferFlag()) {
@@ -211,16 +422,14 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
conv_param->op_parameter_.thread_num_ = ctx->thread_num_;
CheckIfUseWinogradFp16(&use_winograd, &out_unit, conv_param);
}
int group = conv_param->group_;
kernel::LiteKernel *kernel = nullptr;
if (kernel_h == 1 && kernel_w == 1) {
kernel = new (std::nothrow) kernel::Convolution1x1FP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else if (use_winograd) {
kernel = new (std::nothrow)
kernel::ConvolutionWinogradFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive, out_unit);
if (group == 1) {
kernel = CpuConvFp16KernelSelect(inputs, outputs, opParameter, ctx, primitive, use_winograd, out_unit);
} else {
kernel = new (std::nothrow) kernel::ConvolutionFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
kernel = CpuGroupConvFp16KernelCreator(inputs, outputs, opParameter, ctx, primitive, group);
}

if (kernel == nullptr) {
MS_LOG(DEBUG) << "Create conv fp16 kernel failed.";
if (dequant_flag) {


+ 213
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc View File

@@ -0,0 +1,213 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/runtime/kernel/arm/fp16/group_convolution_fp16.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Conv2D;

namespace mindspore::kernel {
int GroupConvolutionFP16CPUKernel::Init() {
for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel init failed.";
return ret;
}
}
// if infer shape is done, resize func will be invoked in sub kernels
return RET_OK;
}

int GroupConvolutionFP16CPUKernel::ReSize() {
for (int i = 0; i < group_num_; ++i) {
auto ret = group_convs_[i]->ReSize();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Sub kernel resize failed.";
return RET_ERROR;
}
}
conv_param_->input_channel_ /= group_num_;
conv_param_->output_channel_ /= group_num_;
return RET_OK;
}

void GroupConvolutionFP16CPUKernel::FreeSubKernel() {
for (auto sub_conv : group_convs_) {
// free sub conv input tensors / output tensors manually
auto sub_in_tensors = sub_conv->in_tensors();
auto sub_in_tensor_num = sub_in_tensors.size();
for (size_t i = 0; i < sub_in_tensor_num; ++i) {
delete sub_in_tensors[i];
}
auto sub_out_tensors = sub_conv->out_tensors();
auto sub_out_tensor_num = sub_out_tensors.size();
for (size_t i = 0; i < sub_out_tensor_num; ++i) {
delete sub_out_tensors[i];
}
delete sub_conv;
}
}

int GroupConvolutionFP16CPUKernel::PreProcess() {
if (!InferShapeDone()) {
auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_);
if (ret != RET_OK) {
(const_cast<mindspore::lite::PrimitiveC *>(primitive_))->SetInferFlag(false);
MS_LOG(ERROR) << "InferShape fail!";
return ret;
}
(const_cast<mindspore::lite::PrimitiveC *>(primitive_))->SetInferFlag(true);
ret = ReSize();
if (ret != RET_OK) {
MS_LOG(ERROR) << "ReSize fail!ret: " << ret;
return ret;
}

// if infershape func is called in runtime stage, we should malloc memory and set shape info for outputs of sub
// kernels here.
std::vector<int> in_shape;
std::vector<int> out_shape;
for (int i = 0; i < group_num_; ++i) {
// in
int in_batch = conv_param_->input_batch_;
int in_h = conv_param_->input_h_;
int in_w = conv_param_->input_w_;
int in_c = conv_param_->input_channel_;
in_shape = {in_batch, in_h, in_w, in_c};
auto sub_kernel_in_tensor = group_convs_[i]->in_tensors().front();
sub_kernel_in_tensor->set_shape(in_shape);
ret = sub_kernel_in_tensor->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "sub kernel in tensor malloc data failed.";
return ret;
}
// out
int out_batch = conv_param_->output_batch_;
int out_h = conv_param_->output_h_;
int out_w = conv_param_->output_w_;
int out_c = conv_param_->output_channel_;
out_shape = {out_batch, out_h, out_w, out_c};
auto sub_kernel_out_tensors = group_convs_[i]->out_tensors();
for (auto tensor : sub_kernel_out_tensors) {
tensor->set_shape(out_shape);
ret = tensor->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "sub kernel out tensor malloc data failed.";
return ret;
}
}
}
}

auto outputs = this->out_tensors();
for (auto *output : outputs) {
MS_ASSERT(output != nullptr);
auto ret = output->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "fp16 group conv out tensor malloc data failed.";
return ret;
}
}
return RET_OK;
}

int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) {
// input may either be float32 or float16
int in_h = conv_param_->input_h_;
int in_w = conv_param_->input_w_;
int in_plane = in_h * in_w;
int sub_in_channel = conv_param_->input_channel_;
int ori_in_channel = sub_in_channel * group_num_;
auto sub_in_data = group_convs_[group_id]->in_tensors().front()->data_c();
auto in_data_type = in_tensors_.front()->data_type();
auto sub_in_data_type = group_convs_[group_id]->in_tensors().front()->data_type();
if (in_data_type != sub_in_data_type) {
MS_LOG(ERROR) << "data type of sub conv kernel input should be the same as origin input's.";
return RET_ERROR;
}
if (!(in_data_type == kNumberTypeFloat32 || in_data_type == kNumberTypeFloat16)) {
MS_LOG(ERROR) << "Invaild data type.";
return RET_ERROR;
}
if (in_tensors_.front()->data_type() == kNumberTypeFloat16) {
float16_t *src_ptr = reinterpret_cast<float16_t *>(ori_in_data_) + group_id * sub_in_channel;
float16_t *dst_ptr = reinterpret_cast<float16_t *>(sub_in_data);
for (int i = 0; i < in_plane; ++i) {
memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(float16_t));
src_ptr += ori_in_channel;
dst_ptr += sub_in_channel;
}
} else {
float *src_ptr = reinterpret_cast<float *>(ori_in_data_) + group_id * sub_in_channel;
float *dst_ptr = reinterpret_cast<float *>(sub_in_data);
for (int i = 0; i < in_plane; ++i) {
memcpy(dst_ptr, src_ptr, sub_in_channel * sizeof(float));
src_ptr += ori_in_channel;
dst_ptr += sub_in_channel;
}
}
return RET_OK;
}

void GroupConvolutionFP16CPUKernel::PostConcat(int group_id) {
// output is must float16 data type
int out_h = conv_param_->output_h_;
int out_w = conv_param_->output_w_;
int out_plane = out_h * out_w;
int sub_out_channel = conv_param_->output_channel_;
int ori_out_channel = sub_out_channel * group_num_;
auto sub_out_data = reinterpret_cast<float16_t *>(group_convs_[group_id]->out_tensors().front()->data_c());
float16_t *src_ptr = sub_out_data;
float16_t *dst_ptr = ori_out_data_ + group_id * sub_out_channel;
for (int i = 0; i < out_plane; ++i) {
memcpy(dst_ptr, src_ptr, sub_out_channel * sizeof(float16_t));
src_ptr += sub_out_channel;
dst_ptr += ori_out_channel;
}
}

int GroupConvolutionFP16CPUKernel::Run() {
ori_in_data_ = in_tensors().front()->data_c();
ori_out_data_ = reinterpret_cast<float16_t *>(out_tensors().front()->data_c());
for (int i = 0; i < group_num_; ++i) {
// first, separate group conv input into several parts. This step must be in runtime stage.
auto ret = SeparateInput(i);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Separate input failed.";
return ret;
}
// sun kernels run
ret = group_convs_[i]->Run();
if (ret != RET_OK) {
MS_LOG(ERROR) << "sub kernel " << i << " execute failed.";
return ret;
}
// post process, concat all outputs of sub-kernels into one output
PostConcat(i);
}
return RET_OK;
}
} // namespace mindspore::kernel

+ 56
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h View File

@@ -0,0 +1,56 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GROUP_CONVOLUTION_FP16_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GROUP_CONVOLUTION_FP16_H_

#include <utility>
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/op_base.h"
#include "src/runtime/kernel/arm/base/convolution_base.h"
#include "nnacl/fp16/conv_fp16.h"

namespace mindspore::kernel {
class GroupConvolutionFP16CPUKernel : public ConvolutionBaseCPUKernel {
public:
GroupConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive,
std::vector<kernel::LiteKernel *> group_convs, const int group_num)
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive),
group_convs_(std::move(group_convs)),
group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if
// you want to get real params, multiply in channel / out channel with group num
~GroupConvolutionFP16CPUKernel() override { FreeSubKernel(); }

int Init() override;
int ReSize() override;
int Run() override;
int PreProcess() override;
int SeparateInput(int group_id);
void PostConcat(int group_id);
void FreeSubKernel();

private:
std::vector<kernel::LiteKernel *> group_convs_;
void *ori_in_data_ = nullptr; // do not free
float16_t *ori_out_data_ = nullptr; // do not free
const int group_num_;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GROUP_CONVOLUTION_FP16_H_

+ 83
- 8
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc View File

@@ -169,6 +169,25 @@ ConvParameter *CreateNewConvParameter(ConvParameter *parameter) {
return conv_parameter;
}

void FreeMemoryFp32(std::vector<kernel::LiteKernel *> group_convs, std::vector<lite::Tensor *> new_inputs,
std::vector<lite::Tensor *> new_outputs) {
for (auto sub_conv : group_convs) {
if (sub_conv != nullptr) {
delete sub_conv;
}
}
for (auto in_tensor : new_inputs) {
if (in_tensor != nullptr) {
delete in_tensor;
}
}
for (auto out_tensor : new_outputs) {
if (out_tensor != nullptr) {
delete out_tensor;
}
}
}

kernel::LiteKernel *CpuConvFp32KernelSelect(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive,
@@ -201,6 +220,7 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
int new_out_channel = 0;
if (group == 0) {
MS_LOG(ERROR) << "Divisor 'group' cannot be 0.";
return nullptr;
} else {
new_out_channel = out_channel / group;
}
@@ -211,8 +231,9 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
bool has_bias = input_num == 3;
bool use_winograd = false;
int out_unit;
bool infered_flag = primitive != nullptr && primitive->GetInferFlag();

if (primitive != nullptr && primitive->GetInferFlag()) {
if (infered_flag) {
int batch = inputs.front()->Batch();
int in_h = inputs.front()->Height();
int in_w = inputs.front()->Width();
@@ -232,21 +253,48 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
std::vector<lite::Tensor *> new_outputs;
auto new_conv_parameter = CreateNewConvParameter(conv_param);
if (new_conv_parameter == nullptr) {
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "Get new conv parameter failed.";
return nullptr;
}
// get new input for each group
auto in_tensor =
new (std::nothrow) lite::Tensor(inputs.front()->data_type(), in_shape, Format_NHWC, lite::Tensor::Category::VAR);
if (primitive != nullptr && primitive->GetInferFlag()) {
in_tensor->MallocData();
if (in_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new in_tensor failed.";
return nullptr;
}
if (infered_flag) {
auto ret = in_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete in_tensor;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "in tensor malloc failed.";
return nullptr;
}
}
new_inputs.emplace_back(in_tensor);

// nwe weight
// new weight
auto filter_tensor = new (std::nothrow) lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape,
Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
filter_tensor->MallocData();
if (filter_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new filter_tensor failed.";
return nullptr;
}
auto ret = filter_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete filter_tensor;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "filter_tensor malloc failed.";
return nullptr;
}
int copy_length = kernel_h * kernel_w * new_in_channel * new_out_channel;
memcpy(filter_tensor->data_c(), origin_weight + i * copy_length, copy_length * sizeof(float));
new_inputs.emplace_back(filter_tensor);
@@ -256,7 +304,20 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
auto *origin_bias = reinterpret_cast<float *>(inputs.at(kBiasIndex)->data_c());
auto bias_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_tensor->MallocData();
if (bias_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new bias_tensor failed.";
return nullptr;
}
ret = bias_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete bias_tensor;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "bias_tensor malloc failed.";
return nullptr;
}
memcpy(bias_tensor->data_c(), origin_bias + i * new_out_channel, new_out_channel * sizeof(float));
new_inputs.emplace_back(bias_tensor);
}
@@ -264,11 +325,24 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
// set new output tensor
for (int j = 0; j < output_num; ++j) {
auto tmp_out_tensor = new (std::nothrow) lite::Tensor();
if (tmp_out_tensor == nullptr) {
delete new_conv_parameter;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "new tmp_out_tensor failed.";
return nullptr;
}
tmp_out_tensor->set_data_type(outputs.at(j)->data_type());
tmp_out_tensor->SetFormat(outputs.at(j)->GetFormat());
if (primitive != nullptr && primitive->GetInferFlag()) {
if (infered_flag) {
tmp_out_tensor->set_shape(out_shape);
tmp_out_tensor->MallocData();
ret = tmp_out_tensor->MallocData();
if (ret != RET_OK) {
delete new_conv_parameter;
delete tmp_out_tensor;
FreeMemoryFp32(group_convs, new_inputs, new_outputs);
MS_LOG(ERROR) << "tmp_out_tensor malloc data failed.";
return nullptr;
}
}
new_outputs.emplace_back(tmp_out_tensor);
}
@@ -287,6 +361,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(op_parameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D);
MS_ASSERT(desc.data_type == kNumberTypeFloat32);
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter);
int group = conv_param->group_;
bool use_winograd = false;


+ 42
- 6
mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc View File

@@ -51,17 +51,34 @@ int GroupConvolutionCPUKernel::ReSize() {
return RET_OK;
}

void GroupConvolutionCPUKernel::FreeSubKernel() {
for (auto sub_conv : group_convs_) {
// free sub conv input tensors / output tensors manually
auto sub_in_tensors = sub_conv->in_tensors();
auto sub_in_tensor_num = sub_in_tensors.size();
for (size_t i = 0; i < sub_in_tensor_num; ++i) {
delete sub_in_tensors[i];
}
auto sub_out_tensors = sub_conv->out_tensors();
auto sub_out_tensor_num = sub_out_tensors.size();
for (size_t i = 0; i < sub_out_tensor_num; ++i) {
delete sub_out_tensors[i];
}
delete sub_conv;
}
}

int GroupConvolutionCPUKernel::PreProcess() {
if (!InferShapeDone()) {
auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_);
if (ret != 0) {
if (ret != RET_OK) {
(const_cast<mindspore::lite::PrimitiveC *>(primitive_))->SetInferFlag(false);
MS_LOG(ERROR) << "InferShape fail!";
return ret;
}
(const_cast<mindspore::lite::PrimitiveC *>(primitive_))->SetInferFlag(true);
ret = ReSize();
if (ret != 0) {
if (ret != RET_OK) {
MS_LOG(ERROR) << "ReSize fail!ret: " << ret;
return ret;
}
@@ -79,7 +96,12 @@ int GroupConvolutionCPUKernel::PreProcess() {
in_shape = {in_batch, in_h, in_w, in_c};
auto sub_kernel_in_tensor = group_convs_[i]->in_tensors().front();
sub_kernel_in_tensor->set_shape(in_shape);
sub_kernel_in_tensor->MallocData();
ret = sub_kernel_in_tensor->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "sub kernel in tensor malloc data failed.";
return ret;
}
// out
int out_batch = conv_param_->output_batch_;
int out_h = conv_param_->output_h_;
@@ -89,7 +111,12 @@ int GroupConvolutionCPUKernel::PreProcess() {
auto sub_kernel_out_tensors = group_convs_[i]->out_tensors();
for (auto tensor : sub_kernel_out_tensors) {
tensor->set_shape(out_shape);
tensor->MallocData();
ret = tensor->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "sub kernel out tensor malloc data failed.";
return ret;
}
}
}
}
@@ -97,7 +124,12 @@ int GroupConvolutionCPUKernel::PreProcess() {
auto outputs = this->out_tensors();
for (auto *output : outputs) {
MS_ASSERT(output != nullptr);
output->MallocData();
auto ret = output->MallocData();
if (ret != RET_OK) {
FreeSubKernel();
MS_LOG(ERROR) << "fp32 group conv out tensor malloc data failed.";
return ret;
}
}
return RET_OK;
}
@@ -141,7 +173,11 @@ int GroupConvolutionCPUKernel::Run() {
// first, separate group conv input into several parts. This step must be in runtime stage.
SeparateInput(i);
// sun kernels run
group_convs_[i]->Run();
auto ret = group_convs_[i]->Run();
if (ret != RET_OK) {
MS_LOG(ERROR) << "sub kernel " << i << " execute failed.";
return ret;
}
// post process, concat all outputs of sub-kernels into one output
PostConcat(i);
}


+ 2
- 16
mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h View File

@@ -35,22 +35,7 @@ class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel {
group_convs_(std::move(group_convs)),
group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if
// you want to get real params, multiply in channel / out channel with group num
~GroupConvolutionCPUKernel() override {
for (auto sub_conv : group_convs_) {
// free sub conv input tensors / output tensors manually
auto sub_in_tensors = sub_conv->in_tensors();
auto sub_in_tensor_num = sub_in_tensors.size();
for (size_t i = 0; i < sub_in_tensor_num; ++i) {
delete sub_in_tensors[i];
}
auto sub_out_tensors = sub_conv->out_tensors();
auto sub_out_tensor_num = sub_out_tensors.size();
for (size_t i = 0; i < sub_out_tensor_num; ++i) {
delete sub_out_tensors[i];
}
delete sub_conv;
}
};
~GroupConvolutionCPUKernel() override { FreeSubKernel(); }

int Init() override;
int ReSize() override;
@@ -58,6 +43,7 @@ class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel {
int PreProcess() override;
void SeparateInput(int group_id);
void PostConcat(int group_id);
void FreeSubKernel();

private:
std::vector<kernel::LiteKernel *> group_convs_;


Loading…
Cancel
Save