Browse Source

mem check and fix

tags/v0.7.0-beta
kai00 5 years ago
parent
commit
d7efb5c270
22 changed files with 100 additions and 29 deletions
  1. +5
    -1
      mindspore/lite/java/native/runtime/context.cpp
  2. +5
    -1
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc
  3. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc
  4. +5
    -1
      mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc
  5. +4
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc
  6. +0
    -8
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc
  7. +5
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc
  8. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc
  9. +4
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc
  10. +17
    -3
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc
  11. +4
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc
  12. +4
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc
  13. +9
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc
  14. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc
  15. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc
  16. +6
    -1
      mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc
  17. +1
    -0
      mindspore/lite/test/models_mindspore.cfg
  18. +0
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc
  19. +3
    -0
      mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc
  20. +1
    -1
      mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc
  21. +1
    -0
      mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc
  22. +2
    -0
      mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc

+ 5
- 1
mindspore/lite/java/native/runtime/context.cpp View File

@@ -23,7 +23,11 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_context_Context_creat
jint device_type,
jint thread_num,
jint cpu_bind_mode) {
auto *context = new mindspore::lite::Context();
auto *context = new (std::nothrow) mindspore::lite::Context();
if (context == nullptr) {
MS_LOG(ERROR) << "new Context fail!";
return (jlong)context;
}
switch (device_type) {
case 0:
context->device_ctx_.type = mindspore::lite::DT_CPU;


+ 5
- 1
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc View File

@@ -172,7 +172,11 @@ int ConvolutionSWFP16CPUKernel::ReSize() {
memset(nhwc4_input_, 0, nhwc4_input_size);

// init sliding window param
slidingWindow_param_ = new SlidingWindowParam;
slidingWindow_param_ = new (std::nothrow) SlidingWindowParam;
if (slidingWindow_param_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM);
return RET_OK;
}


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc View File

@@ -166,7 +166,12 @@ int ConvolutionWinogradFP16CPUKernel::MallocFilterMatrix(int oc_block, int oc_bl
return RET_ERROR;
}
memset(matrix_buffer, 0, trans_matrix_data_size);
trans_weight_ = new Matrix();
trans_weight_ = new (std::nothrow) Matrix();
if (trans_weight_ == nullptr) {
MS_LOG(ERROR) << "new Matrix fail!";
free(matrix_buffer);
return RET_ERROR;
}
trans_weight_->SetData(matrix_buffer);
trans_weight_->SetNDim(5);



+ 5
- 1
mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc View File

@@ -130,7 +130,11 @@ int DeconvolutionDepthwiseFp16CPUKernel::Init() {
int DeconvolutionDepthwiseFp16CPUKernel::ReSize() {
FreeTmpBuffer();

sliding_ = new SlidingWindowParam;
sliding_ = new (std::nothrow) SlidingWindowParam;
if (sliding_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlideParam();
// conv base init
auto ret = ConvolutionBaseCPUKernel::Init();


+ 4
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc View File

@@ -99,7 +99,10 @@ kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector<lite::t
return nullptr;
}
auto *kernel = new (std::nothrow) ArithmeticSelfCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "


+ 0
- 8
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc View File

@@ -204,14 +204,6 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::tensor::T
MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D);
kernel::LiteKernel *kernel;
kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
// auto param = reinterpret_cast<ConvParameter *>(opParameter);
// if (param->kernel_h_ == 3 && param->kernel_w_ == 3 && param->stride_h_ == 1 && param->stride_w_ == 1 &&
// param->dilation_h_ == 1 && param->dilation_w_ == 1) {
// kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3CPUKernel(opParameter, inputs, outputs, ctx,
// primitive);
// } else {
// kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
// }

if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";


+ 5
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc View File

@@ -142,7 +142,11 @@ int ConvolutionSWCPUKernel::ReSize() {
memset(nhwc4_input_, 0, nhwc4_input_size);

// init sliding window param
slidingWindow_param_ = new SlidingWindowParam;
slidingWindow_param_ = new (std::nothrow) SlidingWindowParam;
if (slidingWindow_param_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM);

return RET_OK;


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc View File

@@ -145,7 +145,12 @@ int ConvolutionWinogradCPUKernel::MallocFilterMatrix(int oc_block, int oc_block_
return RET_ERROR;
}
memset(matrix_buffer, 0, trans_matrix_data_size);
trans_weight_ = new Matrix();
trans_weight_ = new (std::nothrow) Matrix();
if (trans_weight_ == nullptr) {
MS_LOG(ERROR) << "new Matrix fail!";
free(matrix_buffer);
return RET_ERROR;
}
trans_weight_->SetData(matrix_buffer);
trans_weight_->SetNDim(5);



+ 4
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc View File

@@ -97,7 +97,10 @@ kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector<lite::t
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_ActivationGrad);
auto *kernel = new (std::nothrow) ActivationGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ActivationGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "InferShape kernel failed, name: " << opParameter->name_ << ", type: "


+ 17
- 3
mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc View File

@@ -120,13 +120,27 @@ int ArithmeticGradCPUKernel::InferShape() {
arithmeticParameter_->out_shape_[i] = outShape[i];
}
}

tile_data0 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data0 != nullptr);
if (tile_data0 == nullptr) {
MS_LOG(ERROR) << "new data0 fail!";
return RET_ERROR;
}
tile_data1 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data1 != nullptr);
if (tile_data1 == nullptr) {
MS_LOG(ERROR) << "new data1 fail!";
delete tile_data0;
return RET_ERROR;
}

if (type() == PrimitiveType_DivGrad) {
tile_data2 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data2 != nullptr);
if (tile_data2 == nullptr) {
MS_LOG(ERROR) << "new data2 fail!";
delete tile_data0;
delete tile_data1;
return RET_ERROR;
}
}
}



+ 4
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc View File

@@ -99,7 +99,10 @@ kernel::LiteKernel *CpuBNGradInputFp32KernelCreator(const std::vector<lite::tens
// parameter->name = opDef.name()->str().data();
// parameter->type = opDef.attr_type();
auto *kernel = new (std::nothrow) BNGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BNGradInputCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (RET_OK != ret) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "


+ 4
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc View File

@@ -141,7 +141,10 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector<lite::t
MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradFilter);

auto *kernel = new (std::nothrow) ConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new kernel fail!";
return nullptr;
}

auto ret = kernel->Init();
if (RET_OK != ret) {


+ 9
- 2
mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc View File

@@ -63,7 +63,11 @@ int ConvolutionGradInputCPUKernel::Init() {
int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ *
conv_param->input_channel_ / conv_param->group_;

workspace = new float[ws_size];
workspace = new (std::nothrow) float[ws_size];
if (workspace == nullptr) {
MS_LOG(ERROR) << "new workspace fail!";
return RET_ERROR;
}
return 0;
}

@@ -121,7 +125,10 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector<lite::te
MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradInput);

auto *kernel = new (std::nothrow) ConvolutionGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new kernel fail!";
return nullptr;
}

auto ret = kernel->Init();
if (0 != ret) {


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc View File

@@ -122,8 +122,13 @@ kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector<lite::t
MS_LOG(ERROR) << "Creator failed, opParameter is nullptr!";
return nullptr;
}

auto *kernel = new (std::nothrow) ArithmeticSelfInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfInt8CPUKernel fail!";
return nullptr;
}

auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc View File

@@ -151,7 +151,12 @@ int DeconvolutionDepthwiseInt8CPUKernel::Init() {
int DeconvolutionDepthwiseInt8CPUKernel::ReSize() {
FreeTmpBuffer();

sliding = new SlidingWindowParam;
sliding = new (std::nothrow) SlidingWindowParam;
if (sliding == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}

InitSlideParam();

// conv base init


+ 6
- 1
mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc View File

@@ -108,7 +108,11 @@ void DeConvInt8CPUKernel::CheckSupportOptimize() {
}

int DeConvInt8CPUKernel::InitParam() {
matmul_param_ = new MatMulParameter();
matmul_param_ = new (std::nothrow) MatMulParameter();
if (matmul_param_ == nullptr) {
MS_LOG(ERROR) << "new MatMulParameter fail!";
return RET_ERROR;
}
matmul_param_->row_ = conv_param_->input_h_ * conv_param_->input_w_;
matmul_param_->deep_ = conv_param_->input_channel_;
matmul_param_->col_ = conv_param_->output_channel_ * conv_param_->kernel_h_ * conv_param_->kernel_w_;
@@ -121,6 +125,7 @@ int DeConvInt8CPUKernel::InitParam() {
} else {
/*todo */
}

return RET_OK;
}



+ 1
- 0
mindspore/lite/test/models_mindspore.cfg View File

@@ -1 +1,2 @@
ssd.pb
mobilenet_v2.pb

+ 0
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc View File

@@ -319,6 +319,4 @@ TEST_F(CropTestFp32, CropTest11) {
std::cout << "\n";
CompareOutputData(output, expect_out, kOutSize, 0.000001);
}

} // namespace mindspore


+ 3
- 0
mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc View File

@@ -90,6 +90,7 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
if (kernel == nullptr) {
delete param;
MS_LOG(ERROR) << "Kernel:" << test_name << " create fail.";
delete param;
return nullptr;
}
auto ret = kernel->Init();
@@ -97,6 +98,8 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
delete param;
delete kernel;
MS_LOG(ERROR) << "Init " << test_name << " fail.";
delete kernel;
delete param;
return nullptr;
}
MS_LOG(INFO) << "Initialize input data";


+ 1
- 1
mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc View File

@@ -141,7 +141,7 @@ void TestCase(const std::vector<int> &shape_a, const std::vector<int> &shape_b)

delete kernel;
delete arith_kernel;
delete param;
for (auto tensor : inputs) {
delete tensor;
}


+ 1
- 0
mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc View File

@@ -123,6 +123,7 @@ TEST_F(TestAvgPoolingOpenCL, AvgPoolFp32) {
delete tensor_out;
delete pooling_kernel;
delete pGraph;
delete param;
lite::opencl::OpenCLRuntime::DeleteInstance();
}



+ 2
- 0
mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc View File

@@ -203,6 +203,7 @@ TEST_F(ConvScaleFusionTest, TestConvScaleNode) {
for (auto &cnode : new_meta_graph->nodes) {
ASSERT_EQ(cnode->primitive->value.AsConv2D()->hasBias, true);
}
delete anf_transform;
}

TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) {
@@ -217,5 +218,6 @@ TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) {
ASSERT_EQ(cnode->primitive->value.AsDepthwiseConv2D()->hasBias, true);
ASSERT_EQ(cnode->inputIndex.size(), 3);
}
delete anf_transform;
}
} // namespace mindspore

Loading…
Cancel
Save