Browse Source

add opencl kernels to libmindspore-lite

tags/v1.0.0
wandongdong 5 years ago
parent
commit
8d0082dfba
7 changed files with 16 additions and 26 deletions
  1. +13
    -5
      mindspore/lite/src/CMakeLists.txt
  2. +2
    -16
      mindspore/lite/src/runtime/kernel/opencl/CMakeLists.txt
  3. +0
    -1
      mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc
  4. +0
    -1
      mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h
  5. +1
    -0
      mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc
  6. +0
    -1
      mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc
  7. +0
    -2
      mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc

+ 13
- 5
mindspore/lite/src/CMakeLists.txt View File

@@ -36,11 +36,19 @@ file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cc)
add_library(mindspore-lite SHARED ${LITE_SRC} ${C_OPS_SRC})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field")
add_library(anf OBJECT ${ANF_SRC})
target_link_libraries(mindspore-lite
anf
cpu_kernel_mid_
)

if (SUPPORT_GPU)
add_subdirectory(runtime/kernel/opencl)
target_link_libraries(mindspore-lite
anf
cpu_kernel_mid_
opencl_kernel_lib_
)
else ()
target_link_libraries(mindspore-lite
anf
cpu_kernel_mid_
)
endif ()
add_subdirectory(runtime/kernel/arm)
if (PLATFORM_ARM32 OR PLATFORM_ARM64)
target_link_libraries(mindspore-lite log)


+ 2
- 16
mindspore/lite/src/runtime/kernel/opencl/CMakeLists.txt View File

@@ -1,16 +1,2 @@
set(OPENCL_KERNEL_SRC
${CMAKE_CURRENT_SOURCE_DIR}/subgraph_opencl_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/arithmetic.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/convolution.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/depthwise_conv2d.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/pooling2d.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/matmul.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/softmax.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/concat.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/conv2d_transpose.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/transpose.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/reshape.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/activation.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/caffe_prelu.cc
)
file(GLOB_RECURSE OPENCL_KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/kernel/*.cc)
add_library(opencl_kernel_lib_ OBJECT ${KERNEL_SRC} ${OPENCL_KERNEL_SRC})

+ 0
- 1
mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc View File

@@ -131,7 +131,6 @@ int ArithmeticOpenCLKernel::Run() {
MS_LOG(DEBUG) << this->name() << " Running!";
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();

uint32_t element_num = out_tensors_[0]->ElementsC4Num();
int arg_idx = 0;

ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data());


+ 0
- 1
mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h View File

@@ -40,7 +40,6 @@ class Conv2dTransposeOpenCLKernel : public OpenCLKernel {
int GetImageSize(size_t idx, std::vector<size_t> *img_size) override;

private:
ConvParameter *parameter_;
cl::Kernel kernel_;
void *padWeight_;
void *bias_;


+ 1
- 0
mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc View File

@@ -126,6 +126,7 @@ int ToFormatOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size
im_dst_y = h;
} else {
MS_LOG(ERROR) << "Unsupported format. " << out_tensors_[0]->GetFormat();
return RET_ERROR;
}
img_size->clear();
auto enable_fp16_ = lite::opencl::OpenCLRuntime::GetInstance()->GetFp16Enable();


+ 0
- 1
mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc View File

@@ -129,7 +129,6 @@ TEST_F(TestAvgPoolingOpenCL, AvgPoolFp32) {
printf("%.3f ", output_data[i]);
}
printf("\n");
size_t output_size = tensor_out->Size();
float expect[4] = {2.0f, 3.0f, 4.0f, 5.0f};

for (int i = 0; i < tensor_out->ElementsNum(); ++i)


+ 0
- 2
mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc View File

@@ -54,7 +54,6 @@ void DepthWiseTestMain(ConvParameter *conv_param, T2 *input_data, T1 *weight_dat
}
// pack weight
int OC4 = UP_DIV(conv_param->output_channel_, C4NUM);
int pack_weight_size = conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_;
T1 *packed_weight = weight_data;
@@ -103,7 +102,6 @@ void DepthWiseTestMain(ConvParameter *conv_param, T2 *input_data, T1 *weight_dat
std::vector<kernel::LiteKernel *> kernels{pKernel.get()};
std::vector<lite::tensor::Tensor *> inputs_{&tensor_a};
size_t C4 = UP_DIV(inputs[0]->Channel(), C4NUM);
auto pGraph = std::make_unique<kernel::SubGraphOpenCLKernel>(inputs_, outputs, kernels, kernels, kernels);
if (pGraph.get() == nullptr) {
delete[] packed_input;


Loading…
Cancel
Save