diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index 3f485f15a6..f26c9a7d6a 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -58,7 +58,11 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { MS_LOG(ERROR) << "new char[] failed"; return RET_MEMORY_FAILED; } - std::memcpy(tensor_data, tensor->data.data(), size); + auto ret = memcpy_s(tensor_data, size, tensor->data.data(), size); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + return RET_MEMORY_FAILED; + } param_value->set_tensor_addr(tensor_data); param_value->set_tensor_size(size); parameter->set_default_param(param_value); @@ -154,8 +158,16 @@ int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr(i)); + if (tuple_get_item_prim == nullptr || get_item_value == nullptr) { + MS_LOG(ERROR) << "NewValueNode is nullptr"; + return RET_NULL_PTR; + } std::vector inputs{tuple_get_item_prim, dst_cnode, get_item_value}; CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); + if (get_item_cnode == nullptr) { + MS_LOG(ERROR) << "NewCNode is nullptr"; + return RET_NULL_PTR; + } get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i)); AddNode(out_tensor_id, get_item_cnode); } @@ -216,6 +228,10 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { make_tuple_inputs.emplace_back(cNode); } auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); + if (make_tuple_cnode == nullptr) { + MS_LOG(ERROR) << "NewCNode is nullptr"; + return RET_NULL_PTR; + } make_tuple_cnode->set_fullname_with_scope("return tuple"); std::vector op_inputs; @@ -246,6 +262,10 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { } op_inputs.emplace_back(cnode); auto return_cnode = func_graph_->NewCNode(op_inputs); + if (return_cnode == nullptr) { + MS_LOG(ERROR) << "NewCNode is nullptr"; + return RET_NULL_PTR; + } return_cnode->set_fullname_with_scope("return"); func_graph_->set_return(return_cnode); } diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h index 0e16c3742e..372f8d3042 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h @@ -27,7 +27,7 @@ namespace mindspore::lite { class AnfImporterFromMetaGraphT : public AnfImporter { public: - explicit AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph, FuncGraphPtr func_graph) + AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph, FuncGraphPtr func_graph) : meta_graph_(meta_graph), func_graph_(std::move(func_graph)) {} ~AnfImporterFromMetaGraphT() override = default; diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc index d1b696c4bf..a647a35a59 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc @@ -43,7 +43,6 @@ using int64 = int64_t; using uint64 = uint64_t; namespace mindspore::lite { - static constexpr char kConstantValueNode[] = "Constant"; enum ParseForm : int { @@ -212,7 +211,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node node->set_name(value_proto.name()); const auto &type_proto = value_proto.type(); if (!type_proto.has_tensor_type()) { - MS_LOG(ERROR) << "onnx TypeProto has no tesor_type! "; + MS_LOG(ERROR) << "onnx TypeProto has no tensor_type! "; return RET_PARAM_INVALID; } const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); @@ -248,6 +247,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node std::string initial_data = initialize_proto.raw_data(); auto *tensor_data_buf = reinterpret_cast(tensor_info->MutableData()); if (tensor_data_buf == nullptr) { + delete tensor_info; return RET_MEMORY_FAILED; } tensor_info->set_data(nullptr); @@ -261,6 +261,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node ParamValueLitePtr param_value = std::make_shared(); if (param_value == nullptr) { + delete tensor_info; return RET_NULL_PTR; } param_value->set_tensor_addr(tensor_data_buf); @@ -367,22 +368,38 @@ bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &pr std::make_shared(kDefaultValueSwitchMap[attr_tensor_type], shape_vector); auto *tensor_data_buf = reinterpret_cast(tensor_info->data_c()); ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + return false; + } prim->set_attr(attr_name, MakeValue(tensor_info)); } else { if (attr_tensor_type == onnx::TensorProto_DataType_DOUBLE) { size_t data_size = sizeof(double); double attr_value = 0.0; ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + return false; + } prim->set_attr(attr_name, MakeValue(attr_value)); } else if (attr_tensor_type == onnx::TensorProto_DataType_INT64) { size_t data_size = sizeof(int64_t); int64_t attr_value = 0; ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + return false; + } prim->set_attr(attr_name, MakeValue(attr_value)); } else if (attr_tensor_type == onnx::TensorProto_DataType_BOOL) { size_t data_size = sizeof(bool); bool attr_value = false; ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + return false; + } prim->set_attr(attr_name, MakeValue(attr_value)); } } @@ -399,7 +416,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con return false; } const std::string &ref_attr_name = attr_proto.ref_attr_name(); - string type; + string type = ""; std::size_t pos(0); if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { type = ref_attr_name.substr(pos, string("scalar:").length() - 1); @@ -503,7 +520,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForValueNode(const std::string &value_ return false; } const std::string &ref_attr_name = attr_proto.ref_attr_name(); - string type; + string type = ""; std::size_t pos(0); if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { type = ref_attr_name.substr(pos, string("scalar:").length() - 1); @@ -682,9 +699,17 @@ bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &output const onnx::ValueInfoProto &output_node = importProto.output(out_size); const std::string &out_tuple = output_node.name(); inputs.push_back(anfnode_build_map_[out_tuple]); + if (anfnode_build_map_[out_tuple] == nullptr) { + MS_LOG(ERROR) << "AnfNode is nullptr"; + return false; + } elem.push_back(anfnode_build_map_[out_tuple]->abstract()); } auto maketuple_ptr = outputFuncGraph->NewCNode(inputs); + if (maketuple_ptr == nullptr) { + MS_LOG(ERROR) << "maketuple_ptr is nullptr"; + return false; + } maketuple_ptr->set_abstract(std::make_shared(elem)); inputs.clear(); auto primReturn = std::make_unique(); @@ -857,6 +882,10 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { MS_LOG(ERROR) << "Parse configuration info for pb file failed!"; return status; } + if (onnx_model_ == nullptr) { + MS_LOG(ERROR) << "onnx_model_ is nullptr"; + return RET_NULL_PTR; + } const onnx::GraphProto &graphBuild = onnx_model_->graph(); status = BuildFuncGraph(dstGraph, graphBuild, quantType); if (status != RET_OK) { @@ -871,6 +900,11 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) { auto onnx_model = new (std::nothrow) onnx::ModelProto; + if (onnx_model == nullptr) { + MS_LOG(ERROR) << "New onnx ModelProto failed!"; + ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_NULL_PTR); + return nullptr; + } if (RET_OK != ValidateFileStr(model_path, ".mindir")) { MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_INPUT_PARAM_INVALID); diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.h b/mindspore/lite/tools/anf_importer/import_from_protobuf.h index 11f831e66e..b12503e4a1 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.h +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.h @@ -31,7 +31,7 @@ namespace mindspore::lite { class AnfImporterFromProtobuf : public AnfImporter { public: - explicit AnfImporterFromProtobuf(onnx::ModelProto *onnx_model, FuncGraphPtr func_graph) + AnfImporterFromProtobuf(onnx::ModelProto *onnx_model, FuncGraphPtr func_graph) : onnx_model_(onnx_model), func_graph_(std::move(func_graph)) {} ~AnfImporterFromProtobuf() override = default; diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 20f6c53ae3..8085f423f3 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -203,7 +203,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto output_nums = GetOutputTensorNum(input_cnode); std::vector output_tensors; for (size_t j = 0; j < output_nums; j++) { - output_tensors.push_back(new Tensor()); + output_tensors.push_back(new (std::nothrow) Tensor()); } auto lite_primitive = GetValueNode>(input_cnode->input(0)); if (lite_primitive == nullptr) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc index cc1b4e6267..a2aa75ce44 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc @@ -32,7 +32,6 @@ const BaseRef ConvActivationFusion::DefinePattern() const { auto prim = new schema::PrimitiveT(); prim->value.type = primitive_type; auto prim_value = std::make_shared(prim); - return VectorRef({prim_value, conv_var}); } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h index af7d900bec..ed6417d59b 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h @@ -25,9 +25,9 @@ namespace mindspore { namespace opt { class ConvActivationFusion : public PatternProcessPass { public: - explicit ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion", - schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, - schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) + ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion", + schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, + schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} ~ConvActivationFusion() override = default; const BaseRef DefinePattern() const override; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc index 6b111d0fb1..444f71a2c0 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc @@ -57,6 +57,11 @@ void CalTransale(const AnfNodePtr &bn_scale_node, const AnfNodePtr &bn_var_node, for (int32_t i = 0; i < kernel_num; i++) { float tmp = trans_scale[i] + eps; tmp = pow(tmp, POW_NUM); + if (tmp <= 0.0f) { + MS_LOG(ERROR) << "divisor cannot be 0"; + lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_ERROR); + return; + } trans_scale[i] = 1 / tmp; } if (bn_scale_node != nullptr) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc index bb4bd9bd53..3531f5d4e0 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc @@ -42,7 +42,6 @@ const BaseRef ConvScaleFusion::DefinePattern() const { auto bn_var = std::make_shared(IsScaleNode); auto weight_var = std::make_shared(IsParamNode); auto bias_var = std::make_shared(); - return VectorRef({bn_var, conv_var, weight_var, bias_var}); } const void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, float *trans_scale, diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 208edf15b0..e1eacad970 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -86,14 +86,12 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co auto trans_scale = new (std::nothrow) float[kernel_nums]; if (trans_scale == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; - delete[] trans_scale; return nullptr; } auto trans_bias = new (std::nothrow) float[kernel_nums]; if (trans_bias == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; delete[] trans_scale; - delete[] trans_bias; return nullptr; } GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias); @@ -179,11 +177,10 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, if (kernel_num <= 0) { MS_LOG(ERROR) << "kernel num less than 0"; lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); + return; } auto kernel_size = weight_tensor->tensor_shape_size() / kernel_num; - CalNewWeightTensor(weight_data, kernel_num, kernel_size, trans_scale); - float *bias_data = nullptr; // conv has bias,bias_flag true bool bias_flag = false; @@ -196,7 +193,6 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, bias_data = new (std::nothrow) float[kernel_num]; if (bias_data == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; - delete[] bias_data; return; } } @@ -211,6 +207,10 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne const float *trans_scale) const { MS_ASSERT(weight_data != nullptr); auto tmp_weight_data = new (std::nothrow) float[kernel_num * kernel_size]; + if (tmp_weight_data == nullptr) { + lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_MEMORY_FAILED); + return; + } MS_ASSERT(new_weight_data != nullptr); auto data_size = kernel_num * kernel_size * sizeof(float); if (0 != memset_s(tmp_weight_data, data_size, 0, data_size)) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc index d8ed8a6622..bfd9b8b55e 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc @@ -38,7 +38,6 @@ const BaseRef ConvTupleActivationFusion::DefinePattern() const { auto act_prim = new schema::PrimitiveT(); act_prim->value.type = primitive_type; auto act_value = std::make_shared(act_prim); - return VectorRef({act_value, tuple_get_item}); } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h index cc9344615a..e89974976f 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h @@ -25,9 +25,9 @@ namespace mindspore { namespace opt { class ConvTupleActivationFusion : public PatternProcessPass { public: - explicit ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion", - schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, - schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) + ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion", + schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, + schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} ~ConvTupleActivationFusion() override = default; const BaseRef DefinePattern() const override; diff --git a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h index d6bf85ecf5..3522f66e5e 100644 --- a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h @@ -48,7 +48,6 @@ class LayerNormFusion : public PatternProcessPass { VarPtr beta_; VarPtr epsilon_; }; - } // namespace opt } // namespace mindspore diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc index acd3579ea2..3f03bfffd9 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc @@ -28,14 +28,13 @@ constexpr size_t kActivationInputsLength = 2; } const BaseRef PoolingActivationFusion::DefinePattern() const { auto pooling_var = std::make_shared(IsPoolingNode)(); - auto prim = new schema::PrimitiveT(); + auto prim = new (std::nothrow) schema::PrimitiveT(); if (prim == nullptr) { MS_LOG(ERROR) << "new primitiveT failed"; return nullptr; } prim->value.type = primitive_type; auto prim_value = std::make_shared(prim); - return VectorRef({prim_value, pooling_var}); } @@ -43,7 +42,6 @@ const AnfNodePtr PoolingActivationFusion::Process(const FuncGraphPtr &func_graph const EquivPtr &) const { MS_LOG(DEBUG) << "pooling activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; CheckIfFuncGraphIsNull(func_graph); - CheckIfAnfNodeIsNull(node); auto act_node = node->cast(); CheckIfCNodeIsNull(act_node); diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h index ae93f0ca3f..7633e0e5aa 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h @@ -25,9 +25,9 @@ namespace mindspore { namespace opt { class PoolingActivationFusion : public PatternProcessPass { public: - explicit PoolingAActivationFusion(bool multigraph = true, const std::string &name = "pooling_activation_fusion", - schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, - schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) + PoolingAActivationFusion(bool multigraph = true, const std::string &name = "pooling_activation_fusion", + schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, + schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} ~PoolingAActivationFusion() override = default; const BaseRef DefinePattern() const override; diff --git a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc index 8e3d4edfad..3f76c053ea 100644 --- a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc @@ -75,7 +75,7 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) { auto primitive = std::make_unique(); MS_ASSERT(primitive != nullptr); primitive->value.type = schema::PrimitiveType_Activation; - auto prim2 = new schema::ActivationT; + auto prim2 = new (std::nothrow) schema::ActivationT; MS_ASSERT(prim2 != nullptr); if (min == 0 && max == 6) { prim2->type = schema::ActivationType_RELU6;