Browse Source

!26181 [MSLITE] deconv range expanddims transpose fuzz bug

Merge pull request !26181 from ling/pr
tags/v1.6.0
i-robot Gitee 4 years ago
parent
commit
1c71ff0392
7 changed files with 111 additions and 57 deletions
  1. +4
    -0
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.c
  2. +10
    -7
      mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.c
  3. +37
    -23
      mindspore/lite/src/ops/populate/deconv2d_populate.cc
  4. +22
    -12
      mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc
  5. +16
    -3
      mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc
  6. +1
    -1
      mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h
  7. +21
    -11
      mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc

+ 4
- 0
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.c View File

@@ -31,6 +31,10 @@ int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso
return NNACL_INFER_INVALID;
}

if (inputs_size < C2NUM) {
return NNACL_INPUT_TENSOR_ERROR;
}

if (inputs[1]->data_ == NULL) {
return NNACL_INPUT_TENSOR_ERROR;
}


+ 10
- 7
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.c View File

@@ -27,11 +27,10 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **

const TensorC *input = inputs[0];
TensorC *output = outputs[0];
if (input == NULL || output == NULL) {
return NNACL_NULL_PTR;
}
NNACL_CHECK_NULL_RETURN_ERR(input);
NNACL_CHECK_NULL_RETURN_ERR(output);

if (inputs_size == 3) {
if (inputs_size == C3NUM) {
output->data_type_ = input->data_type_;
} else {
output->data_type_ = kNumberTypeInt32;
@@ -40,14 +39,18 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
if (!InferFlag(inputs, inputs_size)) {
return NNACL_INFER_INVALID;
}
if (GetElementNum(inputs[0]) < 1 || GetElementNum(inputs[1]) < 1 || GetElementNum(inputs[2]) < 1) {
if (GetElementNum(inputs[FIRST_INPUT]) < 1) {
return NNACL_ERR;
}
int shape_size = 0;
if (inputs_size == 3) {
if ((inputs[0]->data_ == NULL) || (inputs[1]->data_ == NULL) || (inputs[2]->data_ == NULL)) {
if (inputs_size == C3NUM) {
if ((inputs[FIRST_INPUT]->data_ == NULL) || (inputs[SECOND_INPUT]->data_ == NULL) ||
(inputs[THIRD_INPUT]->data_ == NULL)) {
return NNACL_INFER_INVALID;
}
if (GetElementNum(inputs[SECOND_INPUT]) < 1 || GetElementNum(inputs[THIRD_INPUT]) < 1) {
return NNACL_ERR;
}
switch (inputs[0]->data_type_) {
case kNumberTypeInt:
case kNumberTypeInt32: {


+ 37
- 23
mindspore/lite/src/ops/populate/deconv2d_populate.cc View File

@@ -20,6 +20,31 @@ using mindspore::schema::PrimitiveType_Conv2dTransposeFusion;

namespace mindspore {
namespace lite {
void SetPadAndAct(schema::PadMode pad_mode, schema::ActivationType act_type, ConvParameter *param) {
switch (pad_mode) {
case schema::PadMode_SAME:
param->pad_mode_ = Pad_same;
break;
case schema::PadMode_VALID:
param->pad_mode_ = Pad_valid;
break;
default:
param->pad_mode_ = Pad_pad;
}

switch (act_type) {
case schema::ActivationType_RELU:
param->act_type_ = ActType_Relu;
break;
case schema::ActivationType_RELU6:
param->act_type_ = ActType_Relu6;
break;
default:
param->act_type_ = ActType_No;
break;
}
}

OpParameter *PopulateDeconvParameter(const void *prim) {
MS_CHECK_TRUE_RET(prim != nullptr, nullptr);

@@ -65,6 +90,12 @@ OpParameter *PopulateDeconvParameter(const void *prim) {
param->output_padding_h_ = static_cast<int>(*(output_paddings->begin()));
param->output_padding_w_ = static_cast<int>(*(output_paddings->begin() + 1));
}
if (param->output_padding_h_ < 0 || param->output_padding_w_ < 0) {
MS_LOG(ERROR) << "invalid output padding";
free(param);
return nullptr;
}

if (stride == nullptr || dilation == nullptr) {
MS_LOG(ERROR) << "nullptr";
free(param);
@@ -75,21 +106,11 @@ OpParameter *PopulateDeconvParameter(const void *prim) {
free(param);
return nullptr;
}
param->kernel_h_ = static_cast<int>(*(kernel_size->begin()));
param->kernel_w_ = static_cast<int>(*(kernel_size->begin() + 1));

param->group_ = static_cast<int>(value->group());
param->stride_h_ = static_cast<int>(*(stride->begin()));
param->stride_w_ = static_cast<int>(*(stride->begin() + 1));
switch (value->pad_mode()) {
case schema::PadMode_SAME:
param->pad_mode_ = Pad_same;
break;
case schema::PadMode_VALID:
param->pad_mode_ = Pad_valid;
break;
default:
param->pad_mode_ = Pad_pad;
}

if (pad_list == nullptr || pad_list->size() < kMinShapeSizeFour) {
param->pad_u_ = 0;
param->pad_d_ = 0;
@@ -105,18 +126,11 @@ OpParameter *PopulateDeconvParameter(const void *prim) {
param->dilation_w_ = static_cast<int>(*(dilation->begin() + 1));
param->input_channel_ = static_cast<int>(value->in_channel());
param->output_channel_ = static_cast<int>(value->out_channel());

auto act_type = value->activation_type();
switch (act_type) {
case schema::ActivationType_RELU:
param->act_type_ = ActType_Relu;
break;
case schema::ActivationType_RELU6:
param->act_type_ = ActType_Relu6;
break;
default:
param->act_type_ = ActType_No;
break;
}
auto pad_mode = value->pad_mode();
SetPadAndAct(pad_mode, act_type, param);

return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Conv2dTransposeFusion, PopulateDeconvParameter, SCHEMA_CUR)


+ 22
- 12
mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc View File

@@ -62,6 +62,14 @@ int TransposeCPUKernel::ReSize() {
for (int i = 0; i < param_->num_axes_; ++i) {
param_->perm_[i] = perm_data[i];
}

for (int i = 0; i < param_->num_axes_; i++) {
if (param_->perm_[i] < 0 || param_->perm_[i] >= param_->num_axes_) {
MS_LOG(ERROR) << "Check perm failed.";
return RET_ERROR;
}
}

auto &inTensor = in_tensors_.front();
auto &outTensor = out_tensors_.front();
auto in_shape = inTensor->shape();
@@ -123,18 +131,20 @@ int TransposeCPUKernel::GetNHNCTransposeFunc(const lite::Tensor *in_tensor, cons
return RET_OK;
}
auto out_shape = out_tensor->shape();
if (param_->perm_[0] == 0 && param_->perm_[1] == 2 && param_->perm_[2] == 3 && param_->perm_[3] == 1) {
nhnc_param_[0] = out_shape[0];
MS_CHECK_FALSE(INT_MUL_OVERFLOW(out_shape[1], out_shape[2]), RET_ERROR);
nhnc_param_[1] = out_shape[1] * out_shape[2];
nhnc_param_[2] = out_shape[3];
if (param_->perm_[FIRST_INPUT] == FIRST_INPUT && param_->perm_[SECOND_INPUT] == THIRD_INPUT &&
param_->perm_[THIRD_INPUT] == FOURTH_INPUT && param_->perm_[FOURTH_INPUT] == SECOND_INPUT) {
nhnc_param_[FIRST_INPUT] = out_shape[FIRST_INPUT];
MS_CHECK_FALSE(INT_MUL_OVERFLOW(out_shape[SECOND_INPUT], out_shape[THIRD_INPUT]), RET_ERROR);
nhnc_param_[SECOND_INPUT] = out_shape[SECOND_INPUT] * out_shape[THIRD_INPUT];
nhnc_param_[THIRD_INPUT] = out_shape[FOURTH_INPUT];
GetNchwToNhwcFunc(in_tensor->data_type());
}
if (param_->perm_[0] == 0 && param_->perm_[1] == 3 && param_->perm_[2] == 1 && param_->perm_[3] == 2) {
nhnc_param_[0] = out_shape[0];
MS_CHECK_FALSE(INT_MUL_OVERFLOW(out_shape[2], out_shape[3]), RET_ERROR);
nhnc_param_[1] = out_shape[2] * out_shape[3];
nhnc_param_[2] = out_shape[1];
if (param_->perm_[FIRST_INPUT] == FIRST_INPUT && param_->perm_[SECOND_INPUT] == FOURTH_INPUT &&
param_->perm_[THIRD_INPUT] == SECOND_INPUT && param_->perm_[FOURTH_INPUT] == THIRD_INPUT) {
nhnc_param_[FIRST_INPUT] = out_shape[FIRST_INPUT];
MS_CHECK_FALSE(INT_MUL_OVERFLOW(out_shape[THIRD_INPUT], out_shape[FOURTH_INPUT]), RET_ERROR);
nhnc_param_[SECOND_INPUT] = out_shape[THIRD_INPUT] * out_shape[FOURTH_INPUT];
nhnc_param_[THIRD_INPUT] = out_shape[SECOND_INPUT];
GetNhwcToNchwFunc(in_tensor->data_type());
}
return RET_OK;
@@ -142,8 +152,8 @@ int TransposeCPUKernel::GetNHNCTransposeFunc(const lite::Tensor *in_tensor, cons

int TransposeCPUKernel::RunImpl(int task_id) {
if (NHNCTransposeFunc_ != nullptr) {
NHNCTransposeFunc_(in_data_, out_data_, nhnc_param_[0], nhnc_param_[1], nhnc_param_[2], task_id,
op_parameter_->thread_num_);
NHNCTransposeFunc_(in_data_, out_data_, nhnc_param_[FIRST_INPUT], nhnc_param_[SECOND_INPUT],
nhnc_param_[THIRD_INPUT], task_id, op_parameter_->thread_num_);
} else {
return TransposeDimGreaterThan6(task_id);
}


+ 16
- 3
mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc View File

@@ -134,19 +134,27 @@ int MatmulBaseInt8CPUKernel::MallocQuantParam() {
return RET_OK;
}

void MatmulBaseInt8CPUKernel::InitQuantParam() {
int MatmulBaseInt8CPUKernel::InitQuantParam() {
auto in_quant_params = in_tensors_.at(0)->quant_params();
if (in_quant_params.size() < 1) {
MS_LOG(ERROR) << "invalid in quant param";
return RET_ERROR;
}
quant_param_->input_.zp_ = in_quant_params.front().zeroPoint;
quant_param_->input_.scale_ = in_quant_params.front().scale;

auto out_quant_params = out_tensors_.at(0)->quant_params();
if (out_quant_params.size() < 1) {
MS_LOG(ERROR) << "invalid out quant param";
return RET_ERROR;
}
quant_param_->output_.zp_ = out_quant_params.front().zeroPoint;
quant_param_->output_.scale_ = out_quant_params.front().scale;

auto weight_tensor = in_tensors_.at(1);
const int &weight_quant_num = channel_num_;
auto weight_quant_params = weight_tensor->quant_params();
MS_CHECK_TRUE_RET_VOID(static_cast<int>(weight_quant_params.size()) == weight_quant_num);
MS_CHECK_TRUE_RET(static_cast<int>(weight_quant_params.size()) == weight_quant_num, RET_ERROR);

for (int i = 0; i < weight_quant_num; i++) {
quant_param_->filter_zp_[i] = weight_quant_params[i].zeroPoint;
@@ -163,6 +171,7 @@ void MatmulBaseInt8CPUKernel::InitQuantParam() {
CalculateActivationRangeQuantized(param_->act_type_ == ActType_Relu, param_->act_type_ == ActType_Relu6,
quant_param_->output_.zp_, quant_param_->output_.scale_,
&quant_param_->out_act_min_, &quant_param_->out_act_max_);
return RET_OK;
}

void MatmulBaseInt8CPUKernel::InitParameter() {
@@ -291,7 +300,11 @@ int MatmulBaseInt8CPUKernel::Prepare() {
return ret;
}

InitQuantParam();
ret = InitQuantParam();
if (ret != RET_OK) {
FreeQuantParam();
return ret;
}

ret = InitBias();
if (ret != RET_OK) {


+ 1
- 1
mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h View File

@@ -58,8 +58,8 @@ class MatmulBaseInt8CPUKernel : public InnerKernel {

private:
int MallocQuantParam();
int InitQuantParam();
void FreeQuantParam();
void InitQuantParam();

protected:
MatMulParameter *param_ = nullptr;


+ 21
- 11
mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc View File

@@ -62,6 +62,13 @@ int TransposeInt8CPUKernel::ReSize() {
transpose_param_->perm_[i] = perm_data[i];
}

for (int i = 0; i < transpose_param_->num_axes_; i++) {
if (transpose_param_->perm_[i] < 0 || transpose_param_->perm_[i] >= transpose_param_->num_axes_) {
MS_LOG(ERROR) << "Check perm failed.";
return RET_ERROR;
}
}

transpose_param_->strides_[transpose_param_->num_axes_ - 1] = 1;
transpose_param_->out_strides_[transpose_param_->num_axes_ - 1] = 1;
for (int i = transpose_param_->num_axes_ - 2; i >= 0; i--) {
@@ -82,18 +89,20 @@ int TransposeInt8CPUKernel::DoTranspose(int task_id) {
void TransposeInt8CPUKernel::GetNHNCTransposeFunc(const lite::Tensor *in_tensor, const lite::Tensor *out_tensor,
const TransposeParameter *param) {
auto out_shape = out_tensor->shape();
if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[0] == 0 && param->perm_[1] == 2 &&
param->perm_[2] == 3 && param->perm_[3] == 1) {
nhnc_param_[0] = out_shape[0];
nhnc_param_[1] = out_shape[1] * out_shape[2];
nhnc_param_[2] = out_shape[3];
if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[FIRST_INPUT] == FIRST_INPUT &&
param->perm_[SECOND_INPUT] == THIRD_INPUT && param->perm_[THIRD_INPUT] == FOURTH_INPUT &&
param->perm_[FOURTH_INPUT] == SECOND_INPUT) {
nhnc_param_[FIRST_INPUT] = out_shape[FIRST_INPUT];
nhnc_param_[SECOND_INPUT] = out_shape[SECOND_INPUT] * out_shape[THIRD_INPUT];
nhnc_param_[THIRD_INPUT] = out_shape[FOURTH_INPUT];
NHNCTransposeFunc_ = PackNCHWToNHWCInt8;
}
if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[0] == 0 && param->perm_[1] == 3 &&
param->perm_[2] == 1 && param->perm_[3] == 2) {
nhnc_param_[0] = out_shape[0];
nhnc_param_[1] = out_shape[2] * out_shape[3];
nhnc_param_[2] = out_shape[1];
if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[FIRST_INPUT] == FIRST_INPUT &&
param->perm_[SECOND_INPUT] == FOURTH_INPUT && param->perm_[THIRD_INPUT] == SECOND_INPUT &&
param->perm_[FOURTH_INPUT] == THIRD_INPUT) {
nhnc_param_[FIRST_INPUT] = out_shape[FIRST_INPUT];
nhnc_param_[SECOND_INPUT] = out_shape[THIRD_INPUT] * out_shape[FOURTH_INPUT];
nhnc_param_[THIRD_INPUT] = out_shape[SECOND_INPUT];
NHNCTransposeFunc_ = PackNHWCToNCHWInt8;
}
}
@@ -111,7 +120,8 @@ int TransposeInt8CPUKernel::Run() {
CHECK_NULL_RETURN(out_ptr_);
GetNHNCTransposeFunc(in_tensor, out_tensor, transpose_param_);
if (NHNCTransposeFunc_ != nullptr) {
NHNCTransposeFunc_(in_ptr_, out_ptr_, nhnc_param_[0], nhnc_param_[1], nhnc_param_[2]);
NHNCTransposeFunc_(in_ptr_, out_ptr_, nhnc_param_[FIRST_INPUT], nhnc_param_[SECOND_INPUT],
nhnc_param_[THIRD_INPUT]);
return RET_OK;
}
if (in_dims.size() > kMaxShapeSize) {


Loading…
Cancel
Save