Browse Source

codex clean && fuzz bugfix

feature/build-system-rewrite
jianghui58 4 years ago
parent
commit
cf5a83f39b
10 changed files with 173 additions and 22 deletions
  1. +3
    -9
      mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/bias_grad_infer.c
  2. +18
    -0
      mindspore/lite/src/common/tensor_util.cc
  3. +3
    -1
      mindspore/lite/src/common/tensor_util.h
  4. +27
    -0
      mindspore/lite/src/lite_session.cc
  5. +3
    -0
      mindspore/lite/src/lite_session.h
  6. +23
    -7
      mindspore/lite/src/ops/populate/conv2d_populate.cc
  7. +5
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc
  8. +24
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc
  9. +1
    -0
      mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc
  10. +66
    -4
      mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc

+ 3
- 9
mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/bias_grad_infer.c View File

@@ -27,17 +27,11 @@ int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC
const TensorC *in0 = inputs[0];
TensorC *out = outputs[0];

if (in0->shape_size_ > MAX_SHAPE_SIZE) {
if (in0->shape_size_ > MAX_SHAPE_SIZE || in0->shape_size_ < 1) {
return NNACL_INPUT_TENSOR_ERROR;
}
int inshape[MAX_SHAPE_SIZE];
size_t inshape_size = 0;
ShapeSet(inshape, &inshape_size, in0->shape_, in0->shape_size_);
size_t ndim = inshape_size;
MS_CHECK_TRUE_RET(ndim - 1 <= MAX_SHAPE_SIZE, NNACL_ERR);
for (size_t i = 0; i < ndim - 1; i++) {
inshape[i] = 1;
}
int inshape[] = {in0->shape_[in0->shape_size_ - 1]};
size_t inshape_size = 1;
SetDataTypeFormat(out, in0);
SetShapeArray(out, inshape, inshape_size);



+ 18
- 0
mindspore/lite/src/common/tensor_util.cc View File

@@ -16,6 +16,7 @@

#include "src/common/tensor_util.h"
#include <algorithm>
#include <unordered_map>
#include "schema/model_generated.h"
#include "include/errorcode.h"
#include "src/common/log_adapter.h"
@@ -265,6 +266,23 @@ int CheckTensorsInvalid(const std::vector<Tensor *> &tensors) {
return RET_OK;
}

int CheckGraphInputShapes(const std::vector<Tensor *> &inputs,
const std::unordered_map<Tensor *, std::vector<int>> &input_shape_map) {
for (const auto input : inputs) {
MS_CHECK_TRUE_MSG(input != nullptr, RET_ERROR, "graph input tensor is nullptr.");
if (input_shape_map.find(input) == input_shape_map.end()) {
MS_LOG(ERROR) << "can't find " << input->tensor_name() << " in input_shape_map";
return RET_ERROR;
}
if (!input_shape_map.at(input).empty() && input_shape_map.at(input) != input->shape()) {
MS_LOG(ERROR) << "graph input:" << input->tensor_name()
<< " shape has been illegally modified, please modify the input shape with method Resize().";
return RET_ERROR;
}
}
return RET_OK;
}

std::vector<mindspore::MSTensor> LiteTensorsToMSTensors(const std::vector<lite::Tensor *> &lite_tensors) {
std::vector<mindspore::MSTensor> tensors;
std::transform(lite_tensors.begin(), lite_tensors.end(), std::back_inserter(tensors), [](lite::Tensor *tensor) {


+ 3
- 1
mindspore/lite/src/common/tensor_util.h View File

@@ -17,7 +17,7 @@
#ifndef MINDSPORE_LITE_SRC_COMMON_TENSOR_UTIL_H_
#define MINDSPORE_LITE_SRC_COMMON_TENSOR_UTIL_H_
#include <vector>
#include <unordered_map>
#include <memory>
#include "src/tensor.h"
#include "nnacl/tensor_c.h"
@@ -43,6 +43,8 @@ int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite
int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &outputs,
std::vector<TensorC *> *out_tensor_c);
int CheckTensorsInvalid(const std::vector<Tensor *> &tensors);
int CheckGraphInputShapes(const std::vector<Tensor *> &inputs,
const std::unordered_map<Tensor *, std::vector<int>> &input_shape_map);
std::vector<mindspore::MSTensor> LiteTensorsToMSTensors(const std::vector<lite::Tensor *> &lite_tensors);
void MoveCommonTensorData(Tensor *dst_tensor, Tensor *src_tensor);
void MoveTensorData(Tensor *dst_tensor, Tensor *src_tensor);


+ 27
- 0
mindspore/lite/src/lite_session.cc View File

@@ -416,6 +416,7 @@ void LiteSession::InitGraphOutputTensors(const lite::Model *model) {
void LiteSession::InitGraphInputMap(const lite::Model *model) {
MS_ASSERT(model != nullptr);
MS_ASSERT(this->input_map_.empty());
MS_ASSERT(this->input_shape_map_.empty());
auto graph_input_node_indexes = GetGraphInputNodes(model);
auto graph_in_size = model->input_indices_.size();
for (auto in_node_index : graph_input_node_indexes) {
@@ -443,6 +444,7 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) {
}
auto tensor_name = in_node->name_ + std::to_string(i);
this->input_map_[tensor_name] = in_tensor;
this->input_shape_map_[in_tensor] = in_tensor->shape();
if (!in_tensor->tensor_name().empty()) {
this->input_map_[in_tensor->tensor_name()] = in_tensor;
}
@@ -861,6 +863,12 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af
MS_LOG(ERROR) << "CheckInputs failed.";
return ret;
}
ret = CheckGraphInputShapes(inputs_, input_shape_map_);
if (ret != RET_OK) {
is_running_.store(false);
MS_LOG(ERROR) << "Check graph input shapes failed.";
return ret;
}
MS_ASSERT(this->context_ != nullptr);
if (before == nullptr && after == nullptr) {
ret = executor_->Run(this->inputs_, this->outputs_, this->kernels_);
@@ -1089,6 +1097,7 @@ LiteSession::~LiteSession() {

// Tensor * in input_map output_map are freed in tensors
input_map_.clear();
input_shape_map_.clear();
output_node_map_.clear();
output_tensor_map_.clear();
input_vec_.clear();
@@ -1143,6 +1152,19 @@ std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetO
return this->output_tensor_map_;
}

int LiteSession::UpdateInputShapeMap() {
for (auto input : inputs_) {
MS_CHECK_TRUE_MSG(input != nullptr, RET_ERROR, "graph input tensor is nullptr.");
if (input_shape_map_.find(input) != input_shape_map_.end()) {
input_shape_map_.at(input) = input->shape();
} else {
MS_LOG(ERROR) << "can't find " << input->tensor_name() << " in input_shape_map";
return RET_ERROR;
}
}
return RET_OK;
}

int LiteSession::ResizeInputs(const std::vector<mindspore::tensor::MSTensor *> &inputs,
const std::vector<std::vector<int>> &dims) {
if (inputs.size() != inputs_.size()) {
@@ -1317,6 +1339,11 @@ int LiteSession::Resize(const std::vector<mindspore::tensor::MSTensor *> &inputs
#if defined(LINUX_RUNTIME)
(void)malloc_trim(0);
#endif
ret = UpdateInputShapeMap();
if (ret != RET_OK) {
MS_LOG(ERROR) << "update input shape map failed.";
return RET_ERROR;
}
return RET_OK;
}



+ 3
- 0
mindspore/lite/src/lite_session.h View File

@@ -104,6 +104,7 @@ class LiteSession : public session::LiteSession {
void InitGraphOutputNodeMap(const lite::Model *model);
void InitGraphOutputTensorMap(const lite::Model *model);
void AdjustModelOutputTensorInitRefCount(const lite::Model *model);
int UpdateInputShapeMap();
int ResizeInputs(const std::vector<mindspore::tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims);
int SetAllocatorForDelegateKernels(const kernel::LiteKernel *kernel);
int PrepareKernels(const Model *model);
@@ -160,6 +161,8 @@ class LiteSession : public session::LiteSession {
std::vector<mindspore::tensor::MSTensor *> input_vec_;
// graph input tensor name -- input tensors
std::unordered_map<std::string, mindspore::tensor::MSTensor *> input_map_;
// graph input tensor -- input tensor shape
std::unordered_map<Tensor *, std::vector<int>> input_shape_map_;
// graph output node name -- output tensors
std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> output_node_map_;



+ 23
- 7
mindspore/lite/src/ops/populate/conv2d_populate.cc View File

@@ -29,8 +29,12 @@ int SetPadAndAct(schema::PadMode pad_mode, schema::ActivationType act_type, Conv
case schema::PadMode_VALID:
param->pad_mode_ = Pad_valid;
break;
default:
case schema::PadMode_PAD:
param->pad_mode_ = Pad_pad;
break;
default:
MS_LOG(ERROR) << "Pad mode does not support, " << pad_mode;
return RET_NOT_SUPPORT;
}

switch (act_type) {
@@ -54,12 +58,9 @@ int SetPadAndAct(schema::PadMode pad_mode, schema::ActivationType act_type, Conv

OpParameter *PopulateConvParameter(const void *prim) {
auto primitive = static_cast<const schema::Primitive *>(prim);
MS_ASSERT(primitive != nullptr);
MS_CHECK_TRUE_MSG(primitive != nullptr, nullptr, "primitive is nullptr.");
auto value = primitive->value_as_Conv2DFusion();
if (value == nullptr) {
MS_LOG(ERROR) << "value is nullptr";
return nullptr;
}
MS_CHECK_TRUE_MSG(value != nullptr, nullptr, "value is nullptr.");

auto *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter)));
if (param == nullptr) {
@@ -95,7 +96,14 @@ OpParameter *PopulateConvParameter(const void *prim) {
free(param);
return nullptr;
}

for (size_t i = 0; i <= 1; i++) {
auto stride_item = *(stride->begin() + i);
if (stride_item < 0 || stride_item > static_cast<int64_t>(INT32_MAX)) {
MS_LOG(ERROR) << "strides has invalid num.";
free(param);
return nullptr;
}
}
param->group_ = static_cast<int>(value->group());
param->stride_h_ = static_cast<int>(*(stride->begin()));
param->stride_w_ = static_cast<int>(*(stride->begin() + 1));
@@ -105,6 +113,14 @@ OpParameter *PopulateConvParameter(const void *prim) {
param->pad_l_ = 0;
param->pad_r_ = 0;
} else {
for (size_t i = 0; i <= kOffsetThree; i++) {
auto pad_item = *(pad_list->begin() + i);
if (pad_item < 0 || pad_item > static_cast<int64_t>(INT32_MAX)) {
MS_LOG(ERROR) << "pad list has invalid num.";
free(param);
return nullptr;
}
}
param->pad_u_ = static_cast<int>(*(pad_list->begin()));
param->pad_d_ = static_cast<int>(*(pad_list->begin() + 1));
param->pad_l_ = static_cast<int>(*(pad_list->begin() + kOffsetTwo));


+ 5
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc View File

@@ -93,6 +93,11 @@ int AddNCPUKernel::Run() {
}
return RET_OK;
}
if (std::any_of(in_tensors_.begin(), in_tensors_.end(),
[this](const lite::Tensor *input) { return input->shape() != out_tensors_.at(0)->shape(); })) {
MS_LOG(ERROR) << "all inputs should have the same shape of output.";
return RET_ERROR;
}
in1_addr_ = input0_data;
in2_addr_ = input1_data;
out_addr_ = output_data;


+ 24
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc View File

@@ -27,6 +27,24 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_ScaleFusion;

namespace mindspore::kernel {
namespace {
int CheckInputsOutputsDataType(const std::vector<lite::Tensor *> &in_tensors,
const std::vector<lite::Tensor *> &out_tensors) {
if (std::any_of(in_tensors.begin(), in_tensors.end(), [](const lite::Tensor *input) {
return input->data_type() != kNumberTypeFloat && input->data_type() != kNumberTypeFloat32;
})) {
MS_LOG(ERROR) << "scale op input data type should float32";
return RET_ERROR;
}
if (std::any_of(out_tensors.begin(), out_tensors.end(), [](const lite::Tensor *output) {
return output->data_type() != kNumberTypeFloat && output->data_type() != kNumberTypeFloat32;
})) {
MS_LOG(ERROR) << "scale op output data type should float32";
return RET_ERROR;
}
return RET_OK;
}
} // namespace
ScaleCPUKernel::~ScaleCPUKernel() {
if (scale_param_->const_scale_) {
if (scale_ != nullptr) {
@@ -121,7 +139,12 @@ int ScaleCPUKernel::CalculateParameter() {
int ScaleCPUKernel::Prepare() {
CHECK_LESS_RETURN(in_tensors_.size(), C2NUM);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
auto ret = InitScaleOffset();
auto ret = CheckInputsOutputsDataType(in_tensors_, out_tensors_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Scale inputs or outputs data type is invalid.";
return RET_ERROR;
}
ret = InitScaleOffset();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Scale fp32 InitScaleOffset failed.";
return RET_ERROR;


+ 1
- 0
mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc View File

@@ -32,6 +32,7 @@ ops::PrimitiveC *TfliteArgmaxParser::Parse(const std::unique_ptr<tflite::Operato
prim->set_out_max_value(false);
prim->set_top_k(1);

MS_CHECK_TRUE_MSG(tflite_op->inputs.size() >= kInputSize1, nullptr, "argmax input size should be greater than 1.");
const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs[1]);
MS_CHECK_TRUE_MSG(axis_tensor != nullptr, nullptr, "axis_tensor is nullptr");
const auto &buf_data = tflite_model->buffers.at(axis_tensor->buffer);


+ 66
- 4
mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc View File

@@ -27,6 +27,70 @@ constexpr int kWeightChannelOut = 0;
constexpr int kWeightKernelH = 1;
constexpr int kWeightKernelW = 2;
constexpr int kWeightChannelIn = 3;
STATUS GetConvPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, mindspore::PadMode pad_mode,
const ops::Conv2DFusion *conv_prim, std::vector<int64_t> *params) {
MSLITE_CHECK_PTR(tensor);
MSLITE_CHECK_PTR(params);
MSLITE_CHECK_PTR(conv_prim);
if (tensor->shape.empty()) {
MS_LOG(DEBUG) << "the tensor's shape is dynamic, which obtain only when running.";
return RET_NO_CHANGE;
}
int pad_u = 0;
int pad_d = 0;
int pad_l = 0;
int pad_r = 0;
if (pad_mode == mindspore::PadMode::SAME) {
auto shape = tensor->shape;
MS_CHECK_TRUE_RET(shape.size() == DIMENSION_4D, RET_ERROR);
int input_h = shape.at(kNHWC_H);
int input_w = shape.at(kNHWC_W);
auto strides = conv_prim->get_stride();
MS_CHECK_TRUE_MSG(strides.size() > 1, RET_ERROR, "conv stride param is invalid.");
auto dilates = conv_prim->get_dilation();
MS_CHECK_TRUE_MSG(dilates.size() > 1, RET_ERROR, "conv dilation param is invalid.");
auto kernel_size = conv_prim->get_kernel_size();
MS_CHECK_TRUE_MSG(kernel_size.size() > 1, RET_ERROR, "conv kernel_size param is invalid.");
int stride_h = strides[0];
int stride_w = strides[1];
int dilate_h = dilates[0];
int dilate_w = dilates[1];
int kernel_h = kernel_size[0];
int kernel_w = kernel_size[1];
MS_CHECK_TRUE_MSG(stride_h != 0, RET_ERROR, "stride_h shouldn't be 0");
MS_CHECK_TRUE_MSG(stride_w != 0, RET_ERROR, "stride_w shouldn't be 0");
int output_w = ceil(static_cast<float>(input_w) / static_cast<float>(stride_w));
int output_h = ceil(static_cast<float>(input_h) / static_cast<float>(stride_h));
if (INT_MUL_OVERFLOW(output_h - 1, stride_h) || INT_MUL_OVERFLOW(kernel_h - 1, dilate_h)) {
MS_LOG(ERROR) << "int mul overflow";
return RET_ERROR;
}
int pad_h_all = ((output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h);
if (INT_MUL_OVERFLOW(output_w - 1, stride_w) || INT_MUL_OVERFLOW(kernel_w - 1, dilate_w)) {
MS_LOG(ERROR) << "int mul overflow";
return RET_ERROR;
}
int pad_w_all = ((output_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - input_w);
if (pad_h_all < 0) {
pad_u = pad_d = 0;
} else {
pad_u = pad_h_all / 2;
pad_d = pad_h_all - pad_u;
}
if (pad_w_all < 0) {
pad_l = pad_r = 0;
} else {
pad_l = pad_w_all / 2;
pad_r = pad_w_all - pad_l;
}
}

params->emplace_back(pad_u);
params->emplace_back(pad_d);
params->emplace_back(pad_l);
params->emplace_back(pad_r);
return RET_OK;
}
} // namespace
ops::PrimitiveC *TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
@@ -72,8 +136,7 @@ ops::PrimitiveC *TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT
// calculate pad params
const auto &dataTensor = tflite_subgraph->tensors.at(tflite_op->inputs[0]);
std::vector<int64_t> params;
int status = getPaddingParam(dataTensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w,
weight_shape[kWeightKernelH], weight_shape[kWeightKernelW], &params);
int status = GetConvPaddingParam(dataTensor, padMode, prim.get(), &params);
if (status != RET_OK && status != RET_NO_CHANGE) {
MS_LOG(ERROR) << "get padding params failed";
return nullptr;
@@ -146,8 +209,7 @@ ops::PrimitiveC *TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite

// calculate pad params
std::vector<int64_t> params;
int status = getPaddingParam(data_tensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w,
weight_shape[kWeightKernelH], weight_shape[kWeightKernelW], &params);
int status = GetConvPaddingParam(data_tensor, padMode, prim.get(), &params);
if (status != RET_OK && status != RET_NO_CHANGE) {
MS_LOG(ERROR) << "get padding params failed";
return nullptr;


Loading…
Cancel
Save