You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

relayout.cpp 5.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /**
  2. * \file dnn/test/aarch64/relayout.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/aarch64/fixture.h"
  12. #include "test/common/benchmarker.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/relayout.h"
  15. #include "test/common/rng.h"
  16. #include "test/common/task_record_check.h"
  17. namespace megdnn {
  18. namespace test {
  19. namespace {
  20. template <typename tag>
  21. class AARCH64_RELAYOUT : public AARCH64 {};
  22. TYPED_TEST_CASE(AARCH64_RELAYOUT, relayout::test_types);
  23. TYPED_TEST(AARCH64_RELAYOUT, run) {
  24. relayout::run_test<TypeParam>(this->handle());
  25. }
  26. } // namespace
  27. TEST_F(AARCH64, Relayout) {
  28. Checker<Relayout> checker(handle());
  29. std::vector<::megdnn::DType> dtype_vec;
  30. dtype_vec.push_back(dtype::Float32());
  31. dtype_vec.push_back(dtype::Int16());
  32. dtype_vec.push_back(dtype::Uint16());
  33. dtype_vec.push_back(dtype::Int8());
  34. for (auto dtype : dtype_vec) {
  35. TensorLayout src({1, 54, 112, 256}, {54, 1, 16384, 64}, dtype);
  36. TensorLayout dst({1, 54, 112, 256}, {1548288, 28672, 256, 1}, dtype);
  37. checker.execl({src, dst});
  38. }
  39. }
  40. TEST_F(AARCH64, RelayoutNonContig) {
  41. Checker<Relayout> checker(handle());
  42. std::vector<::megdnn::DType> dtype_vec;
  43. dtype_vec.push_back(dtype::Float32());
  44. dtype_vec.push_back(dtype::Int16());
  45. dtype_vec.push_back(dtype::Uint16());
  46. dtype_vec.push_back(dtype::Int8());
  47. for (auto dtype : dtype_vec) {
  48. TensorLayout src({4, 90, 15, 29}, {41760, 1, 2784, 96}, dtype);
  49. TensorLayout dst({4, 90, 15, 29}, {39150, 435, 29, 1}, dtype);
  50. checker.execl({src, dst});
  51. }
  52. }
  53. TEST_F(AARCH64, RelayoutBig) {
  54. Checker<Relayout> checker(handle());
  55. ConsecutiveRNG rng;
  56. checker.set_rng(0, &rng);
  57. int m = 512;
  58. int n = 512;
  59. TensorLayout src({(size_t)m, (size_t)n}, {1, n}, dtype::Float32());
  60. TensorLayout dst({(size_t)m, (size_t)n}, {n, 1}, dtype::Float32());
  61. checker.execl({src, dst});
  62. }
  63. TEST_F(AARCH64, RelayoutRecord) {
  64. TaskRecordChecker<Relayout> checker(0);
  65. std::vector<::megdnn::DType> dtype_vec;
  66. dtype_vec.push_back(dtype::Float32());
  67. dtype_vec.push_back(dtype::Int16());
  68. dtype_vec.push_back(dtype::Uint16());
  69. dtype_vec.push_back(dtype::Int8());
  70. for (auto dtype : dtype_vec) {
  71. TensorLayout src({1, 54, 112, 256}, {54, 1, 16384, 64}, dtype);
  72. TensorLayout dst({1, 54, 112, 256}, {1548288, 28672, 256, 1}, dtype);
  73. checker.execl({src, dst});
  74. }
  75. }
  76. #if MEGDNN_WITH_BENCHMARK
  77. TEST_F(AARCH64, BENCHMARK_Relayout) {
  78. constexpr size_t WARM_RUNS = 100;
  79. constexpr size_t RUNS = 600;
  80. auto dtype = dtype::Float32();
  81. Benchmarker<Relayout> benchmarker_relayout(handle());
  82. Benchmarker<Relayout> benchmarker_fbk_relayout(fallback_handle());
  83. benchmarker_relayout.set_times(WARM_RUNS);
  84. benchmarker_fbk_relayout.set_times(WARM_RUNS);
  85. int m = 512;
  86. int n = 512;
  87. TensorLayout src({(size_t)m, (size_t)n}, {1, n}, dtype);
  88. TensorLayout dst({(size_t)m, (size_t)n}, {n, 1}, dtype);
  89. TensorLayoutArray tensor_case;
  90. tensor_case.push_back(src);
  91. tensor_case.push_back(dst);
  92. benchmarker_relayout.exec(tensor_case);
  93. benchmarker_fbk_relayout.exec(tensor_case);
  94. benchmarker_relayout.set_times(RUNS);
  95. benchmarker_fbk_relayout.set_times(RUNS);
  96. auto used = benchmarker_relayout.exec(tensor_case) / RUNS;
  97. auto fbk_used = benchmarker_fbk_relayout.exec(tensor_case) / RUNS;
  98. float bw = 2.f * m * n * 1e-6 / used * dtype.size();
  99. float fbk_bw = 2.f * m * n * 1e-6 / fbk_used * dtype.size();
  100. printf("run: %s -> %s , %f GB/s, fbk %f GB/s, speedup %f\n",
  101. src.to_string().c_str(), dst.to_string().c_str(), bw, fbk_bw, bw / fbk_bw);
  102. }
  103. TEST_F(AARCH64, BENCHMARK_Relayout_2) {
  104. constexpr size_t WARM_RUNS = 100;
  105. constexpr size_t RUNS = 600;
  106. auto dtype = dtype::Float32();
  107. Benchmarker<Relayout> benchmarker_relayout(handle());
  108. Benchmarker<Relayout> benchmarker_fbk_relayout(fallback_handle());
  109. benchmarker_relayout.set_times(WARM_RUNS);
  110. benchmarker_fbk_relayout.set_times(WARM_RUNS);
  111. int m = 54;
  112. int n = 28762;
  113. TensorLayout src({1, 54, 112, 256}, {54, 1, 16384, 64}, dtype);
  114. TensorLayout dst({1, 54, 112, 256}, {1548288, 28672, 256, 1}, dtype);
  115. TensorLayoutArray tensor_case;
  116. tensor_case.push_back(src);
  117. tensor_case.push_back(dst);
  118. benchmarker_relayout.exec(tensor_case);
  119. benchmarker_fbk_relayout.exec(tensor_case);
  120. benchmarker_relayout.set_times(RUNS);
  121. benchmarker_fbk_relayout.set_times(RUNS);
  122. auto used = benchmarker_relayout.exec(tensor_case) / RUNS;
  123. auto fbk_used = benchmarker_fbk_relayout.exec(tensor_case) / RUNS;
  124. float bw = 2.f * m * n * 1e-6 / used * dtype.size();
  125. float fbk_bw = 2.f * m * n * 1e-6 / fbk_used * dtype.size();
  126. printf("run: %s -> %s , %f GB/s, fbk %f GB/s, speedup %f\n",
  127. src.to_string().c_str(), dst.to_string().c_str(), bw, fbk_bw, bw / fbk_bw);
  128. }
  129. #endif
  130. } // namespace test
  131. } // namespace megdnn
  132. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台