diff --git a/mindspore/lite/src/model_common.h b/mindspore/lite/src/model_common.h index ceebf75a90..041ec0b7a6 100644 --- a/mindspore/lite/src/model_common.h +++ b/mindspore/lite/src/model_common.h @@ -34,6 +34,10 @@ int ConvertSubGraph(const schema::SubGraph &sub_graph, Model *model); template bool ConvertNodes(const T &meta_graph, Model *model, int schema_version = SCHEMA_CUR) { MS_ASSERT(model != nullptr); + if (meta_graph.nodes() == nullptr) { + MS_LOG(ERROR) << "meta_graph is invalid, please check your model file."; + return false; + } for (size_t i = 0; i < meta_graph.nodes()->size(); ++i) { auto *node = new (std::nothrow) Model::Node(); if (node == nullptr) { @@ -74,6 +78,10 @@ bool ConvertNodes(const T &meta_graph, Model *model, int schema_version = SCHEMA template bool ConvertTensors(const T &meta_graph, Model *model) { MS_ASSERT(model != nullptr); + if (meta_graph.allTensors() == nullptr) { + MS_LOG(ERROR) << "meta_graph is invalid, please check your model file."; + return false; + } auto tensor_count = meta_graph.allTensors()->size(); for (uint32_t i = 0; i < tensor_count; ++i) { auto *tensor = meta_graph.allTensors()->template GetAs(i); @@ -89,6 +97,11 @@ bool ConvertTensors(const T &meta_graph, Model *model) { template int MetaGraphMappingSubGraph(const T &meta_graph, Model *model) { MS_ASSERT(model != nullptr); + if (meta_graph.inputIndex() == nullptr || meta_graph.outputIndex() == nullptr || meta_graph.nodes() == nullptr || + meta_graph.allTensors() == nullptr) { + MS_LOG(ERROR) << "meta_graph is invalid, please check your model file."; + return RET_ERROR; + } auto *subgraph = new (std::nothrow) Model::SubGraph(); if (subgraph == nullptr) { MS_LOG(ERROR) << "new subGraph fail!"; @@ -109,7 +122,7 @@ int MetaGraphMappingSubGraph(const T &meta_graph, Model *model) { for (uint32_t i = 0; i < node_count; ++i) { subgraph->node_indices_.push_back(i); } - auto tensor_count = meta_graph.nodes()->size(); + auto tensor_count = meta_graph.allTensors()->size(); for (uint32_t i = 0; i < tensor_count; ++i) { subgraph->tensor_indices_.push_back(i); } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index f38195472c..08f750009f 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -29,10 +29,24 @@ namespace { constexpr int DEFAULT_DIM_VALUE = -1; } namespace { +void FreeTensors(std::vector input_tensors, std::vector output_tensors) { + for (auto &tensor : input_tensors) { + delete tensor; + } + for (auto &tensor : output_tensors) { + delete tensor; + } + input_tensors.clear(); + input_tensors.shrink_to_fit(); + output_tensors.clear(); + output_tensors.shrink_to_fit(); +} + std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector &tensor_indexs, const schema::PrimitiveType node_type) { MS_ASSERT(graph != nullptr); std::vector lite_tensors; + bool convert_succ = true; for (size_t i = 0; i < tensor_indexs.size(); i++) { auto &tensorT = graph->allTensors.at(tensor_indexs[i]); auto tensor_shape = tensorT->dims; @@ -41,7 +55,8 @@ std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size())); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "lite tensor is nullptr"; - return std::vector(); + convert_succ = false; + break; } auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t); // when tensorT as param input @@ -52,15 +67,21 @@ std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve auto ret = lite_tensor->MallocData(); if (ret != 0) { MS_LOG(ERROR) << "Malloc tensor data failed"; - return std::vector(); + convert_succ = false; + break; } ret = memcpy_s(lite_tensor->MutableData(), lite_tensor->Size(), tensorT->data.data(), lite_tensor_size); if (ret != EOK) { MS_LOG(ERROR) << "memcpy error: " << ret; - return std::vector(); + convert_succ = false; + break; } lite_tensors.emplace_back(lite_tensor.release()); } + if (!convert_succ) { + FreeTensors(lite_tensors, {}); + return {}; + } return lite_tensors; } void PrintTensorShape(const std::vector &input_tensors, const std::vector &output_tensors) { @@ -81,13 +102,8 @@ void PrintTensorShape(const std::vector &input_tensors, const std::vec MS_LOG(DEBUG) << "output shape" << i++ << ":" << oss.str(); } } -void FreeTensors(std::vector input_tensors, std::vector output_tensors) { - input_tensors.clear(); - input_tensors.shrink_to_fit(); - output_tensors.clear(); - output_tensors.shrink_to_fit(); -} } // namespace + STATUS InferShapePass::Run(MetaGraphT *graph) { MS_ASSERT(graph != nullptr); for (auto idx : graph->inputIndex) { @@ -131,12 +147,7 @@ STATUS InferShapePass::Run(MetaGraphT *graph) { if (ret == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type) << "flag set to false."; - for (auto input_tensor : input_tensors) { - delete input_tensor; - } - for (auto output_tensor : output_tensors) { - delete output_tensor; - } + FreeTensors(input_tensors, output_tensors); return RET_INFER_INVALID; } else if (ret != RET_OK) { MS_LOG(WARNING) << "InferShape failed, name: " << node->name