Browse Source

fix memory leak of opencl conv2d

tags/v1.2.0-rc1
wandongdong 4 years ago
parent
commit
f0a0268ece
3 changed files with 7 additions and 7 deletions
  1. +1
    -1
      mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc
  2. +5
    -0
      mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc
  3. +1
    -6
      mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc

+ 1
- 1
mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc View File

@@ -264,11 +264,11 @@ int Conv2DOpenCLKernel::InitFilter() {
ConvertConvWeight4DTo7D<float, float>(weight_tensor->data_c(), packed_weight_, CO_, KH_, KW_, CI_, ConvertConvWeight4DTo7D<float, float>(weight_tensor->data_c(), packed_weight_, CO_, KH_, KW_, CI_,
block_size_.C); block_size_.C);
} }
FreeDequantedWeight();
} }
} }


allocator->UnmapBuffer(packed_weight_); allocator->UnmapBuffer(packed_weight_);
FreeDequantedWeight();
return RET_OK; return RET_OK;
} }




+ 5
- 0
mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc View File

@@ -125,6 +125,10 @@ void Conv2dTransposeOpenCLKernel::SetConstArgs() {
} }


int Conv2dTransposeOpenCLKernel::InitWeights() { int Conv2dTransposeOpenCLKernel::InitWeights() {
auto ret = DequantWeight();
if (ret != RET_OK) {
return ret;
}
ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_); ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_);
int ci = in_tensors_[0]->shape()[3]; int ci = in_tensors_[0]->shape()[3];
int co = out_tensors_[0]->shape()[3]; int co = out_tensors_[0]->shape()[3];
@@ -180,6 +184,7 @@ int Conv2dTransposeOpenCLKernel::InitWeights() {
} }
} }
allocator->UnmapBuffer(padWeight_); allocator->UnmapBuffer(padWeight_);
FreeDequantedWeight();


// init bias_(image2d mem) // init bias_(image2d mem)
size_t im_dst_x, im_dst_y; size_t im_dst_x, im_dst_y;


+ 1
- 6
mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc View File

@@ -93,10 +93,6 @@ int DepthwiseConv2dOpenCLKernel::Prepare() {
} }


int DepthwiseConv2dOpenCLKernel::InitWeights() { int DepthwiseConv2dOpenCLKernel::InitWeights() {
if (!in_tensors_.at(1)->IsConst()) {
MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant filter yet.";
return RET_ERROR;
}
auto ret = DequantWeight(); auto ret = DequantWeight();
if (ret != RET_OK) { if (ret != RET_OK) {
return ret; return ret;
@@ -124,7 +120,6 @@ int DepthwiseConv2dOpenCLKernel::InitWeights() {
} else { // int8 or int16 } else { // int8 or int16
std::function<int16_t(int16_t)> to_dtype = [](int16_t x) -> int16_t { return x; }; std::function<int16_t(int16_t)> to_dtype = [](int16_t x) -> int16_t { return x; };
PackNCHWToNC4HW4<int16_t, int16_t>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype); PackNCHWToNC4HW4<int16_t, int16_t>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype);
FreeDequantedWeight();
} }
} else { } else {
packed_weight_ = allocator->Malloc(pack_weight_size * sizeof(float)); packed_weight_ = allocator->Malloc(pack_weight_size * sizeof(float));
@@ -138,10 +133,10 @@ int DepthwiseConv2dOpenCLKernel::InitWeights() {
} else { // int8 or int16 } else { // int8 or int16
std::function<float(float)> to_dtype = [](float x) -> float { return x; }; std::function<float(float)> to_dtype = [](float x) -> float { return x; };
PackNCHWToNC4HW4<float, float>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype); PackNCHWToNC4HW4<float, float>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype);
FreeDequantedWeight();
} }
} }
allocator->UnmapBuffer(packed_weight_); allocator->UnmapBuffer(packed_weight_);
FreeDequantedWeight();


size_t dtype_size = sizeof(float); size_t dtype_size = sizeof(float);
if (is_fp16 && in_tensors_.at(kBiasIndex)->data_type() == kNumberTypeFloat16) { if (is_fp16 && in_tensors_.at(kBiasIndex)->data_type() == kNumberTypeFloat16) {


Loading…
Cancel
Save