Browse Source

!8848 [lite] fix memory leak and increase robustness for benchmark

From: @xu_anyue
Reviewed-by: @hangangqiang,@zhang_xue_tong
Signed-off-by: @hangangqiang
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
cd0157ae03
2 changed files with 40 additions and 16 deletions
  1. +14
    -1
      mindspore/lite/src/model_common.h
  2. +26
    -15
      mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc

+ 14
- 1
mindspore/lite/src/model_common.h View File

@@ -34,6 +34,10 @@ int ConvertSubGraph(const schema::SubGraph &sub_graph, Model *model);
template <typename T = schema::MetaGraph, typename U = schema::CNode> template <typename T = schema::MetaGraph, typename U = schema::CNode>
bool ConvertNodes(const T &meta_graph, Model *model, int schema_version = SCHEMA_CUR) { bool ConvertNodes(const T &meta_graph, Model *model, int schema_version = SCHEMA_CUR) {
MS_ASSERT(model != nullptr); MS_ASSERT(model != nullptr);
if (meta_graph.nodes() == nullptr) {
MS_LOG(ERROR) << "meta_graph is invalid, please check your model file.";
return false;
}
for (size_t i = 0; i < meta_graph.nodes()->size(); ++i) { for (size_t i = 0; i < meta_graph.nodes()->size(); ++i) {
auto *node = new (std::nothrow) Model::Node(); auto *node = new (std::nothrow) Model::Node();
if (node == nullptr) { if (node == nullptr) {
@@ -74,6 +78,10 @@ bool ConvertNodes(const T &meta_graph, Model *model, int schema_version = SCHEMA
template <typename T = schema::MetaGraph> template <typename T = schema::MetaGraph>
bool ConvertTensors(const T &meta_graph, Model *model) { bool ConvertTensors(const T &meta_graph, Model *model) {
MS_ASSERT(model != nullptr); MS_ASSERT(model != nullptr);
if (meta_graph.allTensors() == nullptr) {
MS_LOG(ERROR) << "meta_graph is invalid, please check your model file.";
return false;
}
auto tensor_count = meta_graph.allTensors()->size(); auto tensor_count = meta_graph.allTensors()->size();
for (uint32_t i = 0; i < tensor_count; ++i) { for (uint32_t i = 0; i < tensor_count; ++i) {
auto *tensor = meta_graph.allTensors()->template GetAs<schema::Tensor>(i); auto *tensor = meta_graph.allTensors()->template GetAs<schema::Tensor>(i);
@@ -89,6 +97,11 @@ bool ConvertTensors(const T &meta_graph, Model *model) {
template <typename T = schema::MetaGraph> template <typename T = schema::MetaGraph>
int MetaGraphMappingSubGraph(const T &meta_graph, Model *model) { int MetaGraphMappingSubGraph(const T &meta_graph, Model *model) {
MS_ASSERT(model != nullptr); MS_ASSERT(model != nullptr);
if (meta_graph.inputIndex() == nullptr || meta_graph.outputIndex() == nullptr || meta_graph.nodes() == nullptr ||
meta_graph.allTensors() == nullptr) {
MS_LOG(ERROR) << "meta_graph is invalid, please check your model file.";
return RET_ERROR;
}
auto *subgraph = new (std::nothrow) Model::SubGraph(); auto *subgraph = new (std::nothrow) Model::SubGraph();
if (subgraph == nullptr) { if (subgraph == nullptr) {
MS_LOG(ERROR) << "new subGraph fail!"; MS_LOG(ERROR) << "new subGraph fail!";
@@ -109,7 +122,7 @@ int MetaGraphMappingSubGraph(const T &meta_graph, Model *model) {
for (uint32_t i = 0; i < node_count; ++i) { for (uint32_t i = 0; i < node_count; ++i) {
subgraph->node_indices_.push_back(i); subgraph->node_indices_.push_back(i);
} }
auto tensor_count = meta_graph.nodes()->size();
auto tensor_count = meta_graph.allTensors()->size();
for (uint32_t i = 0; i < tensor_count; ++i) { for (uint32_t i = 0; i < tensor_count; ++i) {
subgraph->tensor_indices_.push_back(i); subgraph->tensor_indices_.push_back(i);
} }


+ 26
- 15
mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc View File

@@ -29,10 +29,24 @@ namespace {
constexpr int DEFAULT_DIM_VALUE = -1; constexpr int DEFAULT_DIM_VALUE = -1;
} }
namespace { namespace {
void FreeTensors(std::vector<Tensor *> input_tensors, std::vector<Tensor *> output_tensors) {
for (auto &tensor : input_tensors) {
delete tensor;
}
for (auto &tensor : output_tensors) {
delete tensor;
}
input_tensors.clear();
input_tensors.shrink_to_fit();
output_tensors.clear();
output_tensors.shrink_to_fit();
}

std::vector<Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector<uint32_t> &tensor_indexs, std::vector<Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector<uint32_t> &tensor_indexs,
const schema::PrimitiveType node_type) { const schema::PrimitiveType node_type) {
MS_ASSERT(graph != nullptr); MS_ASSERT(graph != nullptr);
std::vector<Tensor *> lite_tensors; std::vector<Tensor *> lite_tensors;
bool convert_succ = true;
for (size_t i = 0; i < tensor_indexs.size(); i++) { for (size_t i = 0; i < tensor_indexs.size(); i++) {
auto &tensorT = graph->allTensors.at(tensor_indexs[i]); auto &tensorT = graph->allTensors.at(tensor_indexs[i]);
auto tensor_shape = tensorT->dims; auto tensor_shape = tensorT->dims;
@@ -41,7 +55,8 @@ std::vector<Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve
TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size())); TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size()));
if (lite_tensor == nullptr) { if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "lite tensor is nullptr"; MS_LOG(ERROR) << "lite tensor is nullptr";
return std::vector<Tensor *>();
convert_succ = false;
break;
} }
auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t); auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t);
// when tensorT as param input // when tensorT as param input
@@ -52,15 +67,21 @@ std::vector<Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve
auto ret = lite_tensor->MallocData(); auto ret = lite_tensor->MallocData();
if (ret != 0) { if (ret != 0) {
MS_LOG(ERROR) << "Malloc tensor data failed"; MS_LOG(ERROR) << "Malloc tensor data failed";
return std::vector<Tensor *>();
convert_succ = false;
break;
} }
ret = memcpy_s(lite_tensor->MutableData(), lite_tensor->Size(), tensorT->data.data(), lite_tensor_size); ret = memcpy_s(lite_tensor->MutableData(), lite_tensor->Size(), tensorT->data.data(), lite_tensor_size);
if (ret != EOK) { if (ret != EOK) {
MS_LOG(ERROR) << "memcpy error: " << ret; MS_LOG(ERROR) << "memcpy error: " << ret;
return std::vector<Tensor *>();
convert_succ = false;
break;
} }
lite_tensors.emplace_back(lite_tensor.release()); lite_tensors.emplace_back(lite_tensor.release());
} }
if (!convert_succ) {
FreeTensors(lite_tensors, {});
return {};
}
return lite_tensors; return lite_tensors;
} }
void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &output_tensors) { void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &output_tensors) {
@@ -81,13 +102,8 @@ void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vec
MS_LOG(DEBUG) << "output shape" << i++ << ":" << oss.str(); MS_LOG(DEBUG) << "output shape" << i++ << ":" << oss.str();
} }
} }
void FreeTensors(std::vector<Tensor *> input_tensors, std::vector<Tensor *> output_tensors) {
input_tensors.clear();
input_tensors.shrink_to_fit();
output_tensors.clear();
output_tensors.shrink_to_fit();
}
} // namespace } // namespace

STATUS InferShapePass::Run(MetaGraphT *graph) { STATUS InferShapePass::Run(MetaGraphT *graph) {
MS_ASSERT(graph != nullptr); MS_ASSERT(graph != nullptr);
for (auto idx : graph->inputIndex) { for (auto idx : graph->inputIndex) {
@@ -131,12 +147,7 @@ STATUS InferShapePass::Run(MetaGraphT *graph) {
if (ret == RET_INFER_INVALID) { if (ret == RET_INFER_INVALID) {
MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name
<< ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type) << "flag set to false."; << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type) << "flag set to false.";
for (auto input_tensor : input_tensors) {
delete input_tensor;
}
for (auto output_tensor : output_tensors) {
delete output_tensor;
}
FreeTensors(input_tensors, output_tensors);
return RET_INFER_INVALID; return RET_INFER_INVALID;
} else if (ret != RET_OK) { } else if (ret != RET_OK) {
MS_LOG(WARNING) << "InferShape failed, name: " << node->name MS_LOG(WARNING) << "InferShape failed, name: " << node->name


Loading…
Cancel
Save