diff --git a/mindspore/lite/java/native/runtime/context.cpp b/mindspore/lite/java/native/runtime/context.cpp index 921a547b9e..9b5576d34f 100644 --- a/mindspore/lite/java/native/runtime/context.cpp +++ b/mindspore/lite/java/native/runtime/context.cpp @@ -23,7 +23,11 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_context_Context_creat jint device_type, jint thread_num, jint cpu_bind_mode) { - auto *context = new mindspore::lite::Context(); + auto *context = new (std::nothrow) mindspore::lite::Context(); + if (context == nullptr) { + MS_LOG(ERROR) << "new Context fail!"; + return (jlong)context; + } switch (device_type) { case 0: context->device_ctx_.type = mindspore::lite::DT_CPU; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc index bb18df3494..1c3f28bee6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc @@ -172,7 +172,11 @@ int ConvolutionSWFP16CPUKernel::ReSize() { memset(nhwc4_input_, 0, nhwc4_input_size); // init sliding window param - slidingWindow_param_ = new SlidingWindowParam; + slidingWindow_param_ = new (std::nothrow) SlidingWindowParam; + if (slidingWindow_param_ == nullptr) { + MS_LOG(ERROR) << "new SlidingWindowParam fail!"; + return RET_ERROR; + } InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc index f04a77fc0b..dd3ece9e35 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc @@ -166,7 +166,12 @@ int ConvolutionWinogradFP16CPUKernel::MallocFilterMatrix(int oc_block, int oc_bl return RET_ERROR; } memset(matrix_buffer, 0, trans_matrix_data_size); - trans_weight_ = new Matrix(); + trans_weight_ = new (std::nothrow) Matrix(); + if (trans_weight_ == nullptr) { + MS_LOG(ERROR) << "new Matrix fail!"; + free(matrix_buffer); + return RET_ERROR; + } trans_weight_->SetData(matrix_buffer); trans_weight_->SetNDim(5); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index a2a394cb49..146e130502 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -130,7 +130,11 @@ int DeconvolutionDepthwiseFp16CPUKernel::Init() { int DeconvolutionDepthwiseFp16CPUKernel::ReSize() { FreeTmpBuffer(); - sliding_ = new SlidingWindowParam; + sliding_ = new (std::nothrow) SlidingWindowParam; + if (sliding_ == nullptr) { + MS_LOG(ERROR) << "new SlidingWindowParam fail!"; + return RET_ERROR; + } InitSlideParam(); // conv base init auto ret = ConvolutionBaseCPUKernel::Init(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc index d2fb614130..61ff03393e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc @@ -99,7 +99,10 @@ kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc index c0726fc2a3..7ee96c3793 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc @@ -204,14 +204,6 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector(opParameter); - // if (param->kernel_h_ == 3 && param->kernel_w_ == 3 && param->stride_h_ == 1 && param->stride_w_ == 1 && - // param->dilation_h_ == 1 && param->dilation_w_ == 1) { - // kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3CPUKernel(opParameter, inputs, outputs, ctx, - // primitive); - // } else { - // kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive); - // } if (kernel == nullptr) { MS_LOG(ERROR) << "kernel is nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc index e54377de43..82282d4c3c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc @@ -142,7 +142,11 @@ int ConvolutionSWCPUKernel::ReSize() { memset(nhwc4_input_, 0, nhwc4_input_size); // init sliding window param - slidingWindow_param_ = new SlidingWindowParam; + slidingWindow_param_ = new (std::nothrow) SlidingWindowParam; + if (slidingWindow_param_ == nullptr) { + MS_LOG(ERROR) << "new SlidingWindowParam fail!"; + return RET_ERROR; + } InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc index 916a186e5e..828d00884b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc @@ -145,7 +145,12 @@ int ConvolutionWinogradCPUKernel::MallocFilterMatrix(int oc_block, int oc_block_ return RET_ERROR; } memset(matrix_buffer, 0, trans_matrix_data_size); - trans_weight_ = new Matrix(); + trans_weight_ = new (std::nothrow) Matrix(); + if (trans_weight_ == nullptr) { + MS_LOG(ERROR) << "new Matrix fail!"; + free(matrix_buffer); + return RET_ERROR; + } trans_weight_->SetData(matrix_buffer); trans_weight_->SetNDim(5); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index d877009b31..a49dcc663b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -97,7 +97,10 @@ kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { MS_LOG(ERROR) << "InferShape kernel failed, name: " << opParameter->name_ << ", type: " diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index aaeca7c083..37fe1dbcac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -120,13 +120,27 @@ int ArithmeticGradCPUKernel::InferShape() { arithmeticParameter_->out_shape_[i] = outShape[i]; } } + tile_data0 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()]; - MS_ASSERT(tile_data0 != nullptr); + if (tile_data0 == nullptr) { + MS_LOG(ERROR) << "new data0 fail!"; + return RET_ERROR; + } tile_data1 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()]; - MS_ASSERT(tile_data1 != nullptr); + if (tile_data1 == nullptr) { + MS_LOG(ERROR) << "new data1 fail!"; + delete tile_data0; + return RET_ERROR; + } + if (type() == PrimitiveType_DivGrad) { tile_data2 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()]; - MS_ASSERT(tile_data2 != nullptr); + if (tile_data2 == nullptr) { + MS_LOG(ERROR) << "new data2 fail!"; + delete tile_data0; + delete tile_data1; + return RET_ERROR; + } } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index e21ea12322..f9243aa080 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -99,7 +99,10 @@ kernel::LiteKernel *CpuBNGradInputFp32KernelCreator(const std::vectorname = opDef.name()->str().data(); // parameter->type = opDef.attr_type(); auto *kernel = new (std::nothrow) BNGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive); - MS_ASSERT(kernel != nullptr); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new BNGradInputCPUKernel fail!"; + return nullptr; + } auto ret = kernel->Init(); if (RET_OK != ret) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index ee3f59972b..632187f29e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -141,7 +141,10 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vectorInit(); if (RET_OK != ret) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 0c10a11b9f..d6d4896555 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -63,7 +63,11 @@ int ConvolutionGradInputCPUKernel::Init() { int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_ / conv_param->group_; - workspace = new float[ws_size]; + workspace = new (std::nothrow) float[ws_size]; + if (workspace == nullptr) { + MS_LOG(ERROR) << "new workspace fail!"; + return RET_ERROR; + } return 0; } @@ -121,7 +125,10 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vectorInit(); if (0 != ret) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index 3079e3637d..1e38c06425 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -122,8 +122,13 @@ kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vectorInit(); if (ret != RET_OK) { MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index a0099e5b1e..d1f40743c7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -151,7 +151,12 @@ int DeconvolutionDepthwiseInt8CPUKernel::Init() { int DeconvolutionDepthwiseInt8CPUKernel::ReSize() { FreeTmpBuffer(); - sliding = new SlidingWindowParam; + sliding = new (std::nothrow) SlidingWindowParam; + if (sliding == nullptr) { + MS_LOG(ERROR) << "new SlidingWindowParam fail!"; + return RET_ERROR; + } + InitSlideParam(); // conv base init diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 61a10a0144..1852050237 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -108,7 +108,11 @@ void DeConvInt8CPUKernel::CheckSupportOptimize() { } int DeConvInt8CPUKernel::InitParam() { - matmul_param_ = new MatMulParameter(); + matmul_param_ = new (std::nothrow) MatMulParameter(); + if (matmul_param_ == nullptr) { + MS_LOG(ERROR) << "new MatMulParameter fail!"; + return RET_ERROR; + } matmul_param_->row_ = conv_param_->input_h_ * conv_param_->input_w_; matmul_param_->deep_ = conv_param_->input_channel_; matmul_param_->col_ = conv_param_->output_channel_ * conv_param_->kernel_h_ * conv_param_->kernel_w_; @@ -121,6 +125,7 @@ int DeConvInt8CPUKernel::InitParam() { } else { /*todo */ } + return RET_OK; } diff --git a/mindspore/lite/test/models_mindspore.cfg b/mindspore/lite/test/models_mindspore.cfg index 802f5f33ee..f408c7ef61 100644 --- a/mindspore/lite/test/models_mindspore.cfg +++ b/mindspore/lite/test/models_mindspore.cfg @@ -1 +1,2 @@ ssd.pb +mobilenet_v2.pb diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index da4f4ba41b..5452e80f46 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -319,6 +319,4 @@ TEST_F(CropTestFp32, CropTest11) { std::cout << "\n"; CompareOutputData(output, expect_out, kOutSize, 0.000001); } - } // namespace mindspore - diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc index 7a6a28bd03..2f9fd94767 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc @@ -90,6 +90,7 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all if (kernel == nullptr) { delete param; MS_LOG(ERROR) << "Kernel:" << test_name << " create fail."; + delete param; return nullptr; } auto ret = kernel->Init(); @@ -97,6 +98,8 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all delete param; delete kernel; MS_LOG(ERROR) << "Init " << test_name << " fail."; + delete kernel; + delete param; return nullptr; } MS_LOG(INFO) << "Initialize input data"; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc index b3cec80bd1..4c068c8b82 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc @@ -195,7 +195,7 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) delete kernel; delete arith_kernel; - + delete param; for (auto tensor : inputs) { delete tensor; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc index 150d8efa26..1774875d1d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc @@ -142,6 +142,7 @@ TEST_F(TestAvgPoolingOpenCL, AvgPoolFp32) { delete tensor_out; delete pooling_kernel; delete pGraph; + delete param; lite::opencl::OpenCLRuntime::DeleteInstance(); } diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index 06f47ed5a3..8ae10d5d95 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -203,6 +203,7 @@ TEST_F(ConvScaleFusionTest, TestConvScaleNode) { for (auto &cnode : new_meta_graph->nodes) { ASSERT_EQ(cnode->primitive->value.AsConv2D()->hasBias, true); } + delete anf_transform; } TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) { @@ -217,5 +218,6 @@ TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) { ASSERT_EQ(cnode->primitive->value.AsDepthwiseConv2D()->hasBias, true); ASSERT_EQ(cnode->inputIndex.size(), 3); } + delete anf_transform; } } // namespace mindspore