From 2cedb2caee5d8e3b4fb0e3ecf54b33c785e6e8f7 Mon Sep 17 00:00:00 2001 From: liuzhongkai Date: Wed, 19 Aug 2020 04:13:45 -0700 Subject: [PATCH] memory leak --- .../kernel/opencl/kernel/depthwise_conv2d.cc | 4 +- .../runtime/kernel/opencl/kernel/softmax.cc | 2 + .../runtime/kernel/opencl/activation_tests.cc | 27 ++++++-- .../kernel/opencl/caffe_prelu_tests.cc | 64 ++++++++++++++++--- 4 files changed, 81 insertions(+), 16 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index 128256a1ce..6a8ee02b4b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vectorInit(); - if (0 != ret) { - MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!"; + if (ret != RET_OK) { delete kernel; + MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!"; return nullptr; } return kernel; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index c0fb0b13e9..54e693a52e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vectorshape()[0] > 1) { MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported multi-batch."; + delete kernel; + return nullptr; } auto ret = kernel->Init(); if (0 != ret) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc index 03f4e0ba8f..7a6a28bd03 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc @@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all auto *kernel = new (std::nothrow) kernel::ActivationOpenClKernel(reinterpret_cast(param), inputs, outputs); if (kernel == nullptr) { + delete param; MS_LOG(ERROR) << "Kernel:" << test_name << " create fail."; return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { + delete param; + delete kernel; MS_LOG(ERROR) << "Init " << test_name << " fail."; return nullptr; } @@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector &inputs, std::vector kernels{kernel}; auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels); if (sub_graph == nullptr) { + delete kernel; MS_LOG(ERROR) << "Kernel SubGraphOpenCLKernel create fail."; return RET_ERROR; } MS_LOG(INFO) << "Initialize sub_graph."; auto ret = sub_graph->Init(); if (ret != RET_OK) { + delete kernel; + delete sub_graph; MS_LOG(ERROR) << "Init sub_graph error."; return RET_ERROR; } MS_LOG(INFO) << "Run SubGraphOpenCLKernel."; ret = sub_graph->Run(); if (ret != RET_OK) { + delete sub_graph; MS_LOG(ERROR) << "Run SubGraphOpenCLKernel error."; return RET_ERROR; } @@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector &inputs, } TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { - MS_LOG(INFO) << "Begin test:"; + MS_LOG(INFO) << "Begin test!"; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); ocl_runtime->Init(); auto allocator = ocl_runtime->GetAllocator(); @@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { auto data_type = kNumberTypeFloat32; auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); - auto *output_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); + auto *input_tensor = + new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); + if (input_tensor == nullptr) { + MS_LOG(ERROR) << "new input tensor error!"; + return; + } + auto *output_tensor = + new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type); + if (output_tensor == nullptr) { + MS_LOG(ERROR) << "new output tensor error!"; + delete input_tensor; + return; + } std::vector inputs{input_tensor}; std::vector outputs{output_tensor}; - // freamework to do!!! allocate memory by hand inputs[0]->MallocData(allocator); std::map Test_Activation_Type; @@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) { MS_LOG(INFO) << "==================output data================"; printf_tensor(outputs[0]); CompareRes(output_tensor, Test_Res_File[it->first]); - delete kernel; it++; } delete input_tensor; delete output_tensor; lite::opencl::OpenCLRuntime::DeleteInstance(); - return; } } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc index 15f496114b..7c3834a748 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/caffe_prelu_tests.cc @@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { std::vector output_shape = {1, 4, 3, 9}; auto data_type = kNumberTypeFloat32; auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + auto *input_tensor = + new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + if (input_tensor == nullptr) { + MS_LOG(ERROR) << "new input tensor error"; + return; + } auto *output_tensor = new lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); - auto *weight_tensor = - new lite::tensor::Tensor(data_type, std::vector{input_shape[3]}, schema::Format_NHWC, tensor_type); + if (output_tensor == nullptr) { + MS_LOG(ERROR) << "new output_tensor error"; + delete input_tensor; + return; + } + auto *weight_tensor = new (std::nothrow) + lite::tensor::Tensor(data_type, std::vector{input_shape[3]}, schema::Format_NHWC, tensor_type); + if (weight_tensor == nullptr) { + MS_LOG(ERROR) << "new weight_tensor error"; + delete input_tensor; + delete output_tensor; + return; + } + std::vector inputs{input_tensor, weight_tensor}; std::vector outputs{output_tensor}; - std::cout << input_tensor->ElementsNum() << std::endl; - std::cout << input_tensor->ElementsC4Num() << std::endl; - // freamework to do!!! allocate memory by hand inputs[0]->MallocData(allocator); inputs[1]->MallocData(allocator); std::cout << input_tensor->Size() << std::endl; @@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { MS_LOG(INFO) << "CaffePRelu==================weight data================"; printf_tensor_caffeprelu(inputs[1], weight_tensor->ElementsNum()); - auto param = new CaffePReluParameter(); + auto param = new (std::nothrow) CaffePReluParameter(); + if (param == nullptr) { + MS_LOG(ERROR) << "new param error!"; + delete input_tensor; + delete output_tensor; + delete weight_tensor; + return; + } param->channel_num_ = input_shape[3]; auto *caffeprelu_kernel = new (std::nothrow) kernel::CaffePReluOpenCLKernel(reinterpret_cast(param), inputs, outputs); if (caffeprelu_kernel == nullptr) { + delete param; + delete input_tensor; + delete output_tensor; + delete weight_tensor; MS_LOG(ERROR) << "Create caffe prelu kernel error."; return; } auto ret = caffeprelu_kernel->Init(); if (ret != RET_OK) { + delete param; + delete input_tensor; + delete output_tensor; + delete weight_tensor; + delete caffeprelu_kernel; MS_LOG(ERROR) << "caffeprelu_kernel init error."; return; } @@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) { std::vector kernels{caffeprelu_kernel}; auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel({input_tensor}, outputs, kernels, kernels, kernels); if (sub_graph == nullptr) { + delete param; + delete input_tensor; + delete output_tensor; + delete weight_tensor; + delete caffeprelu_kernel; MS_LOG(ERROR) << "Create sub_graph kernel error."; return; } ret = sub_graph->Init(); if (ret != RET_OK) { + delete param; + delete input_tensor; + delete output_tensor; + delete weight_tensor; + delete caffeprelu_kernel; + delete sub_graph; MS_LOG(ERROR) << "sub_graph init error."; return; } MS_LOG(INFO) << "Sub graph begin running!"; ret = sub_graph->Run(); if (ret != RET_OK) { + delete input_tensor; + delete output_tensor; + delete weight_tensor; + delete sub_graph; MS_LOG(ERROR) << "sub_graph run error."; return; } MS_LOG(INFO) << "CaffePRelu==================output data================"; printf_tensor_caffeprelu(outputs[0], output_tensor->ElementsC4Num()); - std::cout << "output date size:" << output_tensor->Size() << std::endl; CompareOutCaffePRelu(output_tensor, standard_answer_file); + delete input_tensor; + delete output_tensor; + delete weight_tensor; + delete sub_graph; } } // namespace mindspore