diff --git a/mindspore/lite/src/lite_model.cc b/mindspore/lite/src/lite_model.cc index 37b19b7efd..caf7038dbf 100644 --- a/mindspore/lite/src/lite_model.cc +++ b/mindspore/lite/src/lite_model.cc @@ -63,6 +63,7 @@ int LiteModel::ConvertAttrToTensors(const void *meta_graph) { for (size_t index = 0; index < this->all_nodes_.size(); ++index) { std::vector dst_tensors; auto prim = meta_graph_v0->nodes()->GetAs(index)->primitive(); + MS_ASSERT(prim != nullptr); int status = ConvertAttrs(this->all_nodes_[index], prim, &dst_tensors); if (status != RET_OK) { MS_LOG(ERROR) << "fail to convert attr to tensor."; @@ -97,6 +98,7 @@ void LiteModel::Free() { } for (auto &tensor_buf : attr_tensor_bufs_) { free(tensor_buf); + tensor_buf = nullptr; } attr_tensor_bufs_.resize(0); } diff --git a/mindspore/lite/src/lite_model.h b/mindspore/lite/src/lite_model.h index 0e56b83e1a..02af51f19c 100644 --- a/mindspore/lite/src/lite_model.h +++ b/mindspore/lite/src/lite_model.h @@ -66,6 +66,7 @@ class LiteModel : public Model { return false; } auto c_node = meta_graph.nodes()->template GetAs(i); + MS_ASSERT(c_node != nullptr); auto src_prim = reinterpret_cast(c_node->primitive()); #ifdef PRIMITIVE_WRITEABLE node->primitive_ = PrimitiveC::Create(const_cast(src_prim)); @@ -86,8 +87,10 @@ class LiteModel : public Model { return false; } node->primitive_->set_quant_type(static_cast(c_node->quantType())); + MS_ASSERT(c_node->name() != nullptr); node->name_ = c_node->name()->c_str(); node->node_type_ = static_cast(c_node->nodeType()); + MS_ASSERT(c_node->inputIndex() != nullptr); auto count = c_node->inputIndex()->size(); for (uint32_t j = 0; j < count; ++j) { node->input_indices_.push_back(size_t(c_node->inputIndex()->template GetAs(j))); @@ -180,6 +183,7 @@ class LiteModel : public Model { } } else { auto sub_graphs = meta_graph.subGraph(); + MS_ASSERT(sub_graphs != nullptr); auto sub_graph_size = sub_graphs->size(); for (size_t i = 0; i < sub_graph_size; i++) { auto sub_graph = sub_graphs->template GetAs(i); diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 09fc2401d2..7ddc0296e1 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -143,7 +143,7 @@ lite::Tensor *LiteSession::ConvertTensor(const schema::Tensor &src_tensor) { } } } - lite::Tensor *dst_tensor; + lite::Tensor *dst_tensor = nullptr; if (TypeId(src_tensor.dataType()) == kObjectTypeTensorType) { dst_tensor = new (std::nothrow) TensorList(shape, std::vector(), src_category); } else { diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index 7bc8e4fc8a..174cd86f02 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -268,12 +268,15 @@ int AnfExporter::ExportSubgraph(const FuncGraphPtr &func_graph, const std::uniqu auto partial_cnode = CreatePartialCnode(fg, cnode); primitive_c = GetValueNode>(partial_cnode->input(0)); auto primT = primitive_c->primitiveT(); + MS_ASSERT(primT != nullptr); auto pos = fg_subgraph_map.find(fg); if (pos != fg_subgraph_map.end()) { + MS_ASSERT(primT->value.AsPartial() != nullptr); primT->value.AsPartial()->subGraphIndex = fg_subgraph_map.at(fg); } else { size_t next_subgraph_index = fg_subgraph_map.size() + 1; fg_subgraph_map.insert(std::pair{fg, next_subgraph_index}); + MS_ASSERT(primT->value.AsPartial() != nullptr); primT->value.AsPartial()->subGraphIndex = next_subgraph_index; ret = ExportSubgraph(fg, meta_graphT, next_subgraph_index, keep_graph, copy_primitive, cnode); if (ret != RET_OK) { @@ -730,6 +733,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr(elements[i])) { MS_LOG(ERROR) << "abstract is not AbstractTensor"; + delete (msTensor); return; } auto type = kNumberTypeFloat32; @@ -807,6 +811,7 @@ ValueNodePtr AnfExporter::GetPartialAnfPrim() { partial_primitiveT->value.value = new (std::nothrow) schema::PartialT; if (partial_primitiveT->value.value == nullptr) { MS_LOG(ERROR) << "new PartialT failed"; + delete (partial_primitiveT); return nullptr; } diff --git a/mindspore/lite/tools/anf_importer/import_from_mindir.cc b/mindspore/lite/tools/anf_importer/import_from_mindir.cc index a0081c769b..3c44c319e6 100644 --- a/mindspore/lite/tools/anf_importer/import_from_mindir.cc +++ b/mindspore/lite/tools/anf_importer/import_from_mindir.cc @@ -904,10 +904,12 @@ onnx::ModelProto *AnfImporterFromMindir::ReadOnnxFromBinary(const std::string &m } if (RET_OK != ValidateFileStr(model_path, ".mindir")) { MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; + delete (onnx_model); return nullptr; } if (ReadProtoFromBinaryFile((const char *)model_path.c_str(), onnx_model) != RET_OK) { MS_LOG(ERROR) << "Read onnx model file failed, which is not a matched onnx model"; + delete (onnx_model); return nullptr; } return onnx_model; diff --git a/mindspore/lite/tools/anf_importer/import_from_mindir.h b/mindspore/lite/tools/anf_importer/import_from_mindir.h index f743f473ab..d47bb23932 100644 --- a/mindspore/lite/tools/anf_importer/import_from_mindir.h +++ b/mindspore/lite/tools/anf_importer/import_from_mindir.h @@ -75,8 +75,8 @@ class AnfImporterFromMindir : public AnfImporter { int ir_version_{}; std::unordered_map anfnode_build_map_; std::map default_para_map_; - onnx::ModelProto *onnx_model_; - FuncGraphPtr func_graph_; + onnx::ModelProto *onnx_model_ = nullptr; + FuncGraphPtr func_graph_ = nullptr; }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc index ee4e36c350..bd114c7726 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc @@ -103,6 +103,7 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str auto dstNode = graph->nodes.at(dstPath->nodeIdx).get(); MS_ASSERT(srcNode != nullptr); MS_ASSERT(dstNode != nullptr); + MS_ASSERT(srcNode->primitive->value.AsTranspose() != nullptr); bool isNc2NhAndNh2Nc = srcNode->primitive->value.AsTranspose()->perm == nchw2nhwc_perm && dstNode->primitive->value.AsTranspose()->perm == nhwc2nchw_perm; bool isNh2NcAndNc2Nh = srcNode->primitive->value.AsTranspose()->perm == nhwc2nchw_perm && diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc index 547e6ad02e..f949d8128a 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc @@ -233,6 +233,7 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI transposeParam->perm.resize(inParam->perm.size()); std::transform(inParam->perm.begin(), inParam->perm.end(), transposeParam->perm.begin(), [](const int32_t ele) { return ele; }); + MS_ASSERT(newOpDef->primitive != nullptr); newOpDef->primitive->value.value = transposeParam; return newOpDef; }; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc index 27ab8168b5..7d0b7129af 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc @@ -41,6 +41,7 @@ STATUS GlobalFormatTransformPass::Run(MetaGraphT *graph) { if (type != PrimitiveType_Transpose) { continue; } + MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); if (node->primitive->value.AsTranspose()->perm != nchw2nhwc_perm) { continue; } @@ -183,6 +184,7 @@ STATUS GlobalFormatTransformPass::FindPreNh2NcNodes(MetaGraphT *graph, size_t nc auto &pre_node = graph->nodes.at(input_node_index); MS_ASSERT(pre_node != nullptr); auto node_type = pre_node->primitive->value.type; + MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); if (node_type == schema::PrimitiveType_Transpose && pre_node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm) { if (!IsContain(*pre_nh2nc_nodes, input_node_index)) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc index 06ec76e1e6..ddc1038738 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc @@ -96,7 +96,7 @@ STATUS SingleSwitchPass::UpdateSwitchUser() { bool SingleSwitchPass::IsLoop() { for (auto &node : second_graph_nodes_) { - if (node->primitive->value.type == schema::PrimitiveType_Partial && + if (node->primitive->value.type == schema::PrimitiveType_Partial && node->primitive->value.AsPartial() != nullptr && node->primitive->value.AsPartial()->subGraphIndex == first_subgraph_index_) { body_to_cond_partial_node_ = node; return true; @@ -404,6 +404,7 @@ STATUS SingleSwitchPass::Init() { } // get cond_graph_nodes_ + MS_ASSERT(first_partial_node_->primitive->value.AsPartial() != nullptr); first_subgraph_index_ = first_partial_node_->primitive->value.AsPartial()->subGraphIndex; auto cond_node_indices = graph_->subGraph.at(first_subgraph_index_)->nodeIndices; for (auto &index : cond_node_indices) { @@ -411,6 +412,7 @@ STATUS SingleSwitchPass::Init() { } // get second_graph_nodes_ + MS_ASSERT(second_partial_node_->primitive->value.AsPartial() != nullptr); second_subgraph_index_ = second_partial_node_->primitive->value.AsPartial()->subGraphIndex; auto body_node_indices = graph_->subGraph.at(second_subgraph_index_)->nodeIndices; for (auto &index : body_node_indices) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc index 3efc133e5b..f11f9a3ccb 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc @@ -44,6 +44,7 @@ bool TransOpInsertPass::CanFusion(schema::MetaGraphT *graph, const std::unique_p MS_ASSERT(pre_node->primitive->value != nullptr); if (pre_type_ == kNONE) { if (pre_node->primitive->value.type == schema::PrimitiveType_Transpose) { + MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); if (pre_node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm) { pre_type_ = kNCHW2NHWC; } else if (pre_node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc index d3fc0a7c2b..69d1d0d4ab 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc @@ -35,8 +35,9 @@ STATUS TransOpRemovePass::Run(MetaGraphT *graph) { for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) { auto &node = *iter; auto type = node->primitive->value.type; - if (type == schema::PrimitiveType_Transpose && (node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm || - node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm)) { + if (type == schema::PrimitiveType_Transpose && node->primitive->value.AsTranspose() != nullptr && + (node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm || + node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm)) { auto &input_tensor = graph->allTensors.at(node->inputIndex.at(0)); // less than 4 dims can delete if (!input_tensor->dims.empty() && input_tensor->dims.size() < 4) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc index 3fbadbaaff..c71c64aa59 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc @@ -356,7 +356,15 @@ STATUS CaffeModelParser::ConvertBlobs(const caffe::LayerParameter &layer, std::v } else { count = layer.blobs(i).data_size(); auto buf = std::make_unique(count); + if (buf == nullptr) { + MS_LOG(INFO) << "new buffer failed"; + return RET_NULL_PTR; + } const float *data_ptr = layer.blobs(i).data().data(); + if (data_ptr == nullptr) { + MS_LOG(INFO) << "data of origin layer is nullptr"; + return RET_NULL_PTR; + } if (EOK != ::memcpy_s(buf.get(), count * sizeof(float), data_ptr, count * sizeof(float))) { MS_LOG(ERROR) << "memcpy_s failed."; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc index 1a0a330fdf..0617397876 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc @@ -43,6 +43,10 @@ STATUS OnnxGivenTensorFillParser::ParseInt8GivenIntTensorFill(const onnx::NodePr MS_LOG(ERROR) << "new char[] failed"; return RET_MEMORY_FAILED; } + if (iter->ints().data() == nullptr) { + MS_LOG(ERROR) << "origin ints data in onnx is nullptr"; + return RET_NULL_PTR; + } if (memcpy_s(param_data, data_size, iter->ints().data(), data_size) != EOK) { MS_LOG(ERROR) << "memcpy data failed."; delete[] param_data; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc index b878798713..97dd4bbccf 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc @@ -1090,7 +1090,7 @@ STATUS OnnxModelParser::BuildCNodeForGemm(const onnx::NodeProto &onnx_node, cons return RET_OK; } -STATUS OnnxModelParser::BuildParameterNodeForQuantParam(void *data, const std::string &name, TypeId type) { +STATUS OnnxModelParser::BuildParameterNodeForQuantParam(const void *data, const std::string &name, TypeId type) { if (data == nullptr) { MS_LOG(ERROR) << "value is nullptr."; return RET_NULL_PTR; @@ -1102,10 +1102,18 @@ STATUS OnnxModelParser::BuildParameterNodeForQuantParam(void *data, const std::s std::vector shape_vector; auto parameter_node = anf_root_graph_->add_parameter(); auto abstract_tensor = std::make_shared(TypeIdToType(type), shape_vector); + if (abstract_tensor == nullptr) { + MS_LOG(ERROR) << "new abstract_tensor failed"; + return RET_MEMORY_FAILED; + } parameter_node->set_abstract(abstract_tensor); parameter_node->set_name(name); std::vector shape; ParamValueLitePtr param_value = std::make_shared(); + if (param_value == nullptr) { + MS_LOG(ERROR) << "new param_value failed"; + return RET_MEMORY_FAILED; + } param_value->set_tensor_shape(shape); param_value->set_format(schema::Format_NUM_OF_FORMAT); param_value->set_tensor_type(type); @@ -1166,8 +1174,10 @@ STATUS OnnxModelParser::CopyOnnxTensorData(const onnx::TensorProto &onnx_const_t return RET_NULL_PTR; } size_t data_count = 1; - std::for_each(onnx_const_tensor.dims().begin(), onnx_const_tensor.dims().end(), - [&data_count](int dim) { data_count *= dim; }); + if (!onnx_const_tensor.dims().empty()) { + std::for_each(onnx_const_tensor.dims().begin(), onnx_const_tensor.dims().end(), + [&data_count](int dim) { data_count *= dim; }); + } size_t data_size = 0; const void *onnx_data = nullptr; auto data_type = GetDataTypeFromOnnx(static_cast(onnx_const_tensor.data_type())); @@ -1209,6 +1219,10 @@ STATUS OnnxModelParser::CopyOnnxTensorData(const onnx::TensorProto &onnx_const_t if (data_size == 0) { return RET_OK; } + if (onnx_data == nullptr) { + MS_LOG(ERROR) << "origin data in onnx model is nullptr"; + return RET_MEMORY_FAILED; + } char *param_data = new (std::nothrow) char[data_size]; if (param_data == nullptr) { MS_LOG(ERROR) << "new char[] failed"; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h index 2706d4cdb4..f11f72279a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h @@ -68,7 +68,7 @@ class OnnxModelParser : public ModelParser { const std::unordered_map &anf_nodes_map); STATUS BuildReturnNode(const FuncGraphPtr &func_graph_ptr, const std::vector &return_inputs); STATUS BuildParameterNode(const ParameterPtr ¶meter_node, const onnx::TensorProto &tensor); - STATUS BuildParameterNodeForQuantParam(void *data, const std::string &name, TypeId type); + STATUS BuildParameterNodeForQuantParam(const void *data, const std::string &name, TypeId type); STATUS BuildCNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, std::unordered_map *anf_nodes_map, std::vector *graph_inputs, lite::PrimitiveC *primitive_c, std::string loop_name); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc index 4a1083a43d..5443d9ca9a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc @@ -41,6 +41,8 @@ schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_ STATUS OnnxNodeParser::GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector *value, int *type) { + MS_ASSERT(value != nullptr); + MS_ASSERT(type != nullptr); size_t data_count = 1; std::for_each(onnx_tensor.dims().begin(), onnx_tensor.dims().end(), [&data_count](int dim) { data_count *= dim; }); switch (onnx_tensor.data_type()) { diff --git a/mindspore/lite/tools/converter/parser/tf/tf_util.cc b/mindspore/lite/tools/converter/parser/tf/tf_util.cc index 17524d6212..ebedec45c0 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_util.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_util.cc @@ -85,7 +85,6 @@ schema::Format TensorFlowUtils::ParseNodeFormat(const tensorflow::NodeDef &node_ bool TensorFlowUtils::DecodeInt64(std::string_view *str_view, uint64_t *value) { if (str_view == nullptr || value == nullptr) { - *value = 0; MS_LOG(ERROR) << "str_view or value is nullptr"; return false; } diff --git a/mindspore/lite/tools/optimizer/graph/while_pass.cc b/mindspore/lite/tools/optimizer/graph/while_pass.cc index a568845fa1..c80f5e8741 100644 --- a/mindspore/lite/tools/optimizer/graph/while_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/while_pass.cc @@ -40,6 +40,7 @@ ValueNodePtr WhilePass::GetSwitchAnfPrim() { switch_primitiveT->value.value = new (std::nothrow) schema::SwitchT; if (switch_primitiveT->value.value == nullptr) { MS_LOG(ERROR) << "new MakeTupleT failed"; + delete (switch_primitiveT); return nullptr; }