|
|
|
@@ -29,23 +29,26 @@ class TestConv2dTransposeOpenCL : public mindspore::CommonTest { |
|
|
|
TestConv2dTransposeOpenCL() {} |
|
|
|
}; |
|
|
|
|
|
|
|
TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
void RunTestCase(const std::vector<int> shape, const std::vector<std::string> file_path, bool fp16) { |
|
|
|
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); |
|
|
|
if (fp16) { |
|
|
|
ocl_runtime->SetFp16Enable(true); |
|
|
|
} |
|
|
|
ocl_runtime->Init(); |
|
|
|
auto allocator = ocl_runtime->GetAllocator(); |
|
|
|
int pad = 0; |
|
|
|
int n = 1; |
|
|
|
int h = 240; |
|
|
|
int w = 240; |
|
|
|
int kh = 2; |
|
|
|
int kw = 2; |
|
|
|
int ci = 128; |
|
|
|
int co = 128; |
|
|
|
int pad = shape[0]; |
|
|
|
int n = shape[1]; |
|
|
|
int h = shape[2]; |
|
|
|
int w = shape[3]; |
|
|
|
int kh = shape[4]; |
|
|
|
int kw = shape[5]; |
|
|
|
int ci = shape[6]; |
|
|
|
int co = shape[7]; |
|
|
|
int oh = 2 * h - 1 + 2 * (kh - 1 - pad) - kh + 1; |
|
|
|
int ow = 2 * w - 1 + 2 * (kw - 1 - pad) - kw + 1; |
|
|
|
|
|
|
|
size_t input_size; |
|
|
|
std::string input_path = "./test_data/conv2d_transpose/conv2d_transpose_fp32_input.bin"; |
|
|
|
std::string input_path = file_path[0]; |
|
|
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); |
|
|
|
if (input_data == nullptr) { |
|
|
|
MS_LOG(ERROR) << "input_data load error."; |
|
|
|
@@ -53,7 +56,7 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
} |
|
|
|
|
|
|
|
size_t weight_size; |
|
|
|
std::string weight_path = "./test_data/conv2d_transpose/conv2d_transpose_fp32_weight.bin"; |
|
|
|
std::string weight_path = file_path[1]; |
|
|
|
auto weight_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &weight_size)); |
|
|
|
if (weight_data == nullptr) { |
|
|
|
MS_LOG(ERROR) << "weight_data load error."; |
|
|
|
@@ -61,14 +64,15 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
} |
|
|
|
|
|
|
|
size_t bias_size; |
|
|
|
std::string bias_path = "./test_data/conv2d_transpose/conv2d_transpose_fp32_bias.bin"; |
|
|
|
std::string bias_path = file_path[2]; |
|
|
|
auto bias_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(bias_path.c_str(), &bias_size)); |
|
|
|
if (bias_data == nullptr) { |
|
|
|
MS_LOG(ERROR) << "bias_data load error."; |
|
|
|
return; |
|
|
|
} |
|
|
|
std::vector<int> input_shape = {n, h, w, ci}; |
|
|
|
auto tensor_x_ptr = std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), input_shape); |
|
|
|
auto tensor_x_ptr = |
|
|
|
std::make_unique<lite::tensor::Tensor>(TypeId(fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape); |
|
|
|
auto tensor_x = tensor_x_ptr.get(); |
|
|
|
if (tensor_x == nullptr) { |
|
|
|
MS_LOG(ERROR) << "tensor_x create error."; |
|
|
|
@@ -76,7 +80,8 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
} |
|
|
|
|
|
|
|
std::vector<int> weight_shape = {co, kh, kw, ci}; |
|
|
|
auto tensor_w_ptr = std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), weight_shape); |
|
|
|
auto tensor_w_ptr = |
|
|
|
std::make_unique<lite::tensor::Tensor>(TypeId(fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), weight_shape); |
|
|
|
auto tensor_w = tensor_w_ptr.get(); |
|
|
|
if (tensor_w == nullptr) { |
|
|
|
MS_LOG(ERROR) << "tensor_w create error."; |
|
|
|
@@ -85,7 +90,8 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
tensor_w->SetData(weight_data); |
|
|
|
|
|
|
|
std::vector<int> bias_shape = {co}; |
|
|
|
auto tensor_bias_ptr = std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), bias_shape); |
|
|
|
auto tensor_bias_ptr = |
|
|
|
std::make_unique<lite::tensor::Tensor>(TypeId(fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), bias_shape); |
|
|
|
auto tensor_bias = tensor_bias_ptr.get(); |
|
|
|
if (tensor_bias == nullptr) { |
|
|
|
MS_LOG(ERROR) << "tensor_bias create error."; |
|
|
|
@@ -94,7 +100,8 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
tensor_bias->SetData(bias_data); |
|
|
|
|
|
|
|
std::vector<int> out_shape = {1, oh, ow, co}; |
|
|
|
auto tensor_out_ptr = std::make_unique<lite::tensor::Tensor>(TypeId(kNumberTypeFloat32), out_shape); |
|
|
|
auto tensor_out_ptr = |
|
|
|
std::make_unique<lite::tensor::Tensor>(TypeId(fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape); |
|
|
|
auto tensor_out = tensor_out_ptr.get(); |
|
|
|
if (tensor_out == nullptr) { |
|
|
|
MS_LOG(ERROR) << "tensor_out create error."; |
|
|
|
@@ -116,17 +123,18 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
opParameter->pad_w_ = pad; |
|
|
|
opParameter->input_channel_ = ci; |
|
|
|
opParameter->output_channel_ = co; |
|
|
|
auto arith_kernel_ptr = std::make_unique<kernel::Conv2dTransposeOpenCLKernel>( |
|
|
|
auto op_kernel_ptr = std::make_unique<kernel::Conv2dTransposeOpenCLKernel>( |
|
|
|
reinterpret_cast<OpParameter *>(opParameter), inputs, outputs); |
|
|
|
auto arith_kernel = arith_kernel_ptr.get(); |
|
|
|
if (arith_kernel == nullptr) { |
|
|
|
MS_LOG(ERROR) << "arith_kernel create error."; |
|
|
|
auto op_kernel = op_kernel_ptr.get(); |
|
|
|
if (op_kernel == nullptr) { |
|
|
|
MS_LOG(ERROR) << "op_kernel create error."; |
|
|
|
return; |
|
|
|
} |
|
|
|
arith_kernel->Init(); |
|
|
|
op_kernel->set_name("DeConv"); |
|
|
|
op_kernel->Init(); |
|
|
|
|
|
|
|
inputs[0]->MallocData(allocator); |
|
|
|
std::vector<kernel::LiteKernel *> kernels{arith_kernel}; |
|
|
|
std::vector<kernel::LiteKernel *> kernels{op_kernel}; |
|
|
|
std::vector<lite::tensor::Tensor *> inputs_g{tensor_x}; |
|
|
|
auto pGraph_ptr = std::make_unique<kernel::SubGraphOpenCLKernel>(inputs_g, outputs, kernels, kernels, kernels); |
|
|
|
auto pGraph = pGraph_ptr.get(); |
|
|
|
@@ -138,13 +146,16 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
pGraph->Init(); |
|
|
|
memcpy(inputs[0]->Data(), input_data, input_size); |
|
|
|
pGraph->Run(); |
|
|
|
|
|
|
|
using FLT = float; |
|
|
|
if (fp16) { |
|
|
|
using FLT = float16_t; |
|
|
|
} |
|
|
|
std::cout << "==================output data=================" << std::endl; |
|
|
|
float *output_data = reinterpret_cast<float *>(tensor_out->Data()); |
|
|
|
FLT *output_data = reinterpret_cast<FLT *>(tensor_out->Data()); |
|
|
|
std::cout << std::endl; |
|
|
|
size_t output_size; |
|
|
|
std::string output_path = "./test_data/conv2d_transpose/conv2d_transpose_fp32_output.bin"; |
|
|
|
auto correct_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(output_path.c_str(), &output_size)); |
|
|
|
std::string output_path = file_path[3]; |
|
|
|
auto correct_data = reinterpret_cast<FLT *>(mindspore::lite::ReadFile(output_path.c_str(), &output_size)); |
|
|
|
if (correct_data == nullptr) { |
|
|
|
MS_LOG(ERROR) << "correct_data create error."; |
|
|
|
return; |
|
|
|
@@ -152,7 +163,7 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
int size_n = oh * ow * co; |
|
|
|
size_n = size_n > 100 ? 100 : size_n; |
|
|
|
for (int i = 0; i < size_n; i++) { |
|
|
|
std::cout << output_data[i] << ", "; |
|
|
|
std::cout << output_data[i] << ", " << correct_data[i] << " "; |
|
|
|
if ((i + 1) % co == 0) { |
|
|
|
std::cout << std::endl; |
|
|
|
} |
|
|
|
@@ -160,10 +171,43 @@ TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
std::cout << std::endl; |
|
|
|
|
|
|
|
// compare |
|
|
|
CompareOutputData(output_data, correct_data, oh * ow * co, 0.00001); |
|
|
|
CommonTest::CompareOutputData(output_data, correct_data, oh * ow * co, 0.00001); |
|
|
|
inputs[0]->SetData(nullptr); |
|
|
|
outputs[0]->SetData(nullptr); |
|
|
|
MS_LOG(INFO) << "Test Conv2dTransposeFp32 passed"; |
|
|
|
lite::opencl::OpenCLRuntime::DeleteInstance(); |
|
|
|
} |
|
|
|
TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp32) { |
|
|
|
int pad = 0; |
|
|
|
int n = 1; |
|
|
|
int h = 240; |
|
|
|
int w = 240; |
|
|
|
int kh = 2; |
|
|
|
int kw = 2; |
|
|
|
int ci = 128; |
|
|
|
int co = 128; |
|
|
|
std::vector<int> shape = {pad, n, h, w, kh, kw, ci, co}; |
|
|
|
std::vector<std::string> file_path = {"./test_data/conv2d_transpose/conv2d_transpose_fp32_input.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp32_weight.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp32_bias.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp32_output.bin"}; |
|
|
|
RunTestCase(shape, file_path, false); |
|
|
|
} |
|
|
|
|
|
|
|
TEST_F(TestConv2dTransposeOpenCL, Conv2dTransposeFp16) { |
|
|
|
int pad = 0; |
|
|
|
int n = 1; |
|
|
|
int h = 240; |
|
|
|
int w = 240; |
|
|
|
int kh = 2; |
|
|
|
int kw = 2; |
|
|
|
int ci = 128; |
|
|
|
int co = 128; |
|
|
|
std::vector<int> shape = {pad, n, h, w, kh, kw, ci, co}; |
|
|
|
std::vector<std::string> file_path = {"./test_data/conv2d_transpose/conv2d_transpose_fp16_input.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp16_weight.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp16_bias.bin", |
|
|
|
"./test_data/conv2d_transpose/conv2d_transpose_fp16_output.bin"}; |
|
|
|
RunTestCase(shape, file_path, true); |
|
|
|
} |
|
|
|
} // namespace mindspore |