You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling_multi_thread.cpp 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /**
  2. * \file dnn/test/arm_common/pooling_multi_thread.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/arm_common/fixture.h"
  12. #include "test/common/pooling.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/rng.h"
  16. namespace megdnn {
  17. namespace test {
  18. /*********************** mutli threads *********************************/
  19. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING) {
  20. using Param = param::Pooling;
  21. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  22. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  23. for (size_t p: {1, 2})
  24. {
  25. Param param;
  26. param.mode = Param::Mode::MAX;
  27. param.window_h = param.window_w = 3;
  28. param.stride_h = param.stride_w = 2;
  29. param.pad_h = param.pad_w = p;
  30. Checker<Pooling> checker(handle());
  31. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  32. param.mode = Param::Mode::AVERAGE;
  33. param.window_h = param.window_w = 3;
  34. param.stride_h = param.stride_w = 2;
  35. param.pad_h = param.pad_w = p;
  36. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  37. param.mode = Param::Mode::MAX;
  38. param.window_h = param.window_w = 4;
  39. param.stride_h = param.stride_w = 2;
  40. param.pad_h = param.pad_w = p;
  41. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  42. param.mode = Param::Mode::MAX;
  43. param.window_h = param.window_w = 5;
  44. param.stride_h = param.stride_w = 2;
  45. param.pad_h = param.pad_w = p;
  46. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  47. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  48. }
  49. }
  50. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_MAX_W3x3_S2x2_NCHW44)
  51. {
  52. // clang-format off
  53. for (size_t ih: {3, 5, 10})
  54. for (size_t iw: {3, 5, 7, 9, 15, 20})
  55. for (size_t ph: {0})
  56. for (size_t pw: {0})
  57. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  58. {
  59. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  60. Checker<Pooling> checker(handle());
  61. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  62. checker.set_rng(0,&rng);
  63. param::Pooling param;
  64. param.mode = param::Pooling::Mode::MAX;
  65. param.format = param::Pooling::Format::NCHW44;
  66. param.pad_h = ph;
  67. param.pad_w = pw;
  68. param.stride_h = param.stride_w = 2;
  69. param.window_h = param.window_w = 3;
  70. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  71. }
  72. // clang-format on
  73. }
  74. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W3x3_S2x2)
  75. {
  76. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  77. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  78. for (size_t ph: {0, 1, 2})
  79. for (size_t pw: {0, 1, 2})
  80. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  81. {
  82. Checker<Pooling> checker(handle());
  83. checker.set_dtype(0, dtype::Int8());
  84. param::Pooling param;
  85. param.mode = param::Pooling::Mode::MAX;
  86. param.pad_h = ph;
  87. param.pad_w = pw;
  88. param.stride_h = param.stride_w = 2;
  89. param.window_h = param.window_w = 3;
  90. checker.set_param(param).exec(TensorShapeArray{
  91. {2, 3, ih, iw}, {}});
  92. }
  93. }
  94. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W2x2_S2x2)
  95. {
  96. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  97. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  98. for (size_t ph: {0, 1})
  99. for (size_t pw: {0, 1})
  100. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  101. {
  102. Checker<Pooling> checker(handle());
  103. checker.set_dtype(0, dtype::Int8());
  104. param::Pooling param;
  105. param.mode = param::Pooling::Mode::MAX;
  106. param.pad_h = ph;
  107. param.pad_w = pw;
  108. param.stride_h = param.stride_w = 2;
  109. param.window_h = param.window_w = 2;
  110. checker.set_param(param).exec(TensorShapeArray{
  111. {2, 3, ih, iw}, {}});
  112. }
  113. }
  114. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  115. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FP16) {
  116. Checker<Pooling> checker(handle());
  117. checker.set_dtype(0, dtype::Float16{})
  118. .set_dtype(1, dtype::Float16{})
  119. .set_epsilon(3e-3);
  120. using Param = param::Pooling;
  121. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  122. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  123. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  124. for (size_t window : {2, 3}) {
  125. Param param;
  126. param.mode = mode;
  127. param.window_h = param.window_w = window;
  128. param.stride_h = param.stride_w = 1;
  129. param.pad_h = param.pad_w = window / 2;
  130. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  131. //! == 3)
  132. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  133. //! test for SH = SW = 2 && FH = FW = 2
  134. param.stride_h = param.stride_w = 2;
  135. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  136. }
  137. }
  138. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  139. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  140. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  141. for (size_t ph : {0, 1, 2})
  142. for (size_t pw : {0, 1, 2})
  143. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  144. param::Pooling param;
  145. param.mode = param::Pooling::Mode::MAX;
  146. param.pad_h = ph;
  147. param.pad_w = pw;
  148. param.stride_h = param.stride_w = 2;
  149. param.window_h = param.window_w = 3;
  150. checker.set_param(param).exec(
  151. TensorShapeArray{{2, 3, ih, iw}, {}});
  152. }
  153. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  154. for (size_t ih :
  155. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  156. for (size_t iw :
  157. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  158. for (size_t p : {1, 2}) {
  159. Param param;
  160. param.mode = Param::Mode::MAX;
  161. param.window_h = param.window_w = 4;
  162. param.stride_h = param.stride_w = 2;
  163. param.pad_h = param.pad_w = p;
  164. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  165. }
  166. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  167. for (size_t ih :
  168. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  169. for (size_t iw :
  170. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  171. for (size_t p : {1, 2}) {
  172. Param param;
  173. param.mode = Param::Mode::MAX;
  174. param.window_h = param.window_w = 5;
  175. param.stride_h = param.stride_w = 2;
  176. param.pad_h = param.pad_w = p;
  177. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  178. }
  179. }
  180. #endif
  181. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_QUANTIZED) {
  182. Checker<Pooling> checker(handle());
  183. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  184. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  185. using Param = param::Pooling;
  186. for (auto type : std::vector<DType>{
  187. dtype::QuantizedS8(1.1f),
  188. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  189. if (type.enumv() == DTypeEnum::QuantizedS8) {
  190. checker.set_rng(0, &rng1);
  191. } else {
  192. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  193. checker.set_rng(0, &rng2);
  194. }
  195. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  196. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  197. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  198. for (size_t window : {2, 3}) {
  199. Param param;
  200. param.mode = mode;
  201. param.window_h = param.window_w = window;
  202. param.stride_h = param.stride_w = 1;
  203. param.pad_h = param.pad_w = window / 2;
  204. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  205. //! FH
  206. //! == 3)
  207. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  208. //! test for SH = SW = 2 && FH = FW = 2
  209. param.stride_h = param.stride_w = 2;
  210. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  211. }
  212. }
  213. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  214. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  215. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  216. for (size_t ph : {0, 1, 2})
  217. for (size_t pw : {0, 1, 2})
  218. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  219. param::Pooling param;
  220. param.mode = param::Pooling::Mode::MAX;
  221. param.pad_h = ph;
  222. param.pad_w = pw;
  223. param.window_h = param.window_w = 3;
  224. param.stride_h = param.stride_w = 2;
  225. checker.set_param(param).exec(
  226. TensorShapeArray{{2, 3, ih, iw}, {}});
  227. }
  228. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  229. for (size_t ih :
  230. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  231. for (size_t iw :
  232. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  233. for (size_t p : {1, 2}) {
  234. Param param;
  235. param.mode = Param::Mode::MAX;
  236. param.window_h = param.window_w = 4;
  237. param.stride_h = param.stride_w = 2;
  238. param.pad_h = param.pad_w = p;
  239. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  240. }
  241. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  242. for (size_t ih :
  243. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  244. for (size_t iw :
  245. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  246. for (size_t p : {1, 2}) {
  247. Param param;
  248. param.mode = Param::Mode::MAX;
  249. param.window_h = param.window_w = 5;
  250. param.stride_h = param.stride_w = 2;
  251. param.pad_h = param.pad_w = p;
  252. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  253. }
  254. }
  255. }
  256. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FALLBACK) {
  257. using Param = param::Pooling;
  258. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  259. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  260. for (size_t p: {1, 2})
  261. {
  262. Param param;
  263. param.mode = Param::Mode::MAX;
  264. param.window_h = param.window_w = 3;
  265. param.stride_h = param.stride_w = 2;
  266. param.pad_h = param.pad_w = p;
  267. Checker<Pooling> checker(handle());
  268. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  269. }
  270. }
  271. #if MEGDNN_WITH_BENCHMARK
  272. namespace {
  273. template <typename Opr>
  274. void benchmark_impl(const typename Opr::Param& param,
  275. std::vector<SmallVector<TensorShape>> shapes, size_t RUNS,
  276. TaskExecutorConfig&& multi_thread_config,
  277. TaskExecutorConfig&& single_thread_config) {
  278. std::vector<float> multi_thread_times, single_thread_times;
  279. {
  280. auto multi_thread_hanle =
  281. create_cpu_handle(0, true, &multi_thread_config);
  282. auto benchmarker = Benchmarker<Opr>(multi_thread_hanle.get());
  283. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  284. for (auto shape : shapes) {
  285. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  286. }
  287. }
  288. {
  289. auto single_thread_handle =
  290. create_cpu_handle(0, true, &single_thread_config);
  291. auto benchmarker = Benchmarker<Opr>(single_thread_handle.get());
  292. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  293. for (auto shape : shapes) {
  294. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  295. }
  296. }
  297. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  298. printf("core_ids:");
  299. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  300. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  301. }
  302. printf(", Single thread core_id %zu\n",
  303. single_thread_config.affinity_core_set[0]);
  304. for (size_t i = 0; i < shapes.size(); i++) {
  305. auto shape = shapes[i];
  306. printf("Case: ");
  307. for (auto sh : shape)
  308. printf("%s ", sh.to_string().c_str());
  309. printf("%zu threads time: %f,\n single thread time: "
  310. "%f. spead up = %f, speedup/cores=%f\n",
  311. multi_thread_config.nr_thread, multi_thread_times[i],
  312. single_thread_times[i],
  313. single_thread_times[i] / multi_thread_times[i],
  314. single_thread_times[i] / multi_thread_times[i] /
  315. multi_thread_config.nr_thread);
  316. }
  317. }
  318. } // namespace
  319. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING) {
  320. constexpr size_t RUNS = 50;
  321. using Param = param::Pooling;
  322. Param param;
  323. param.window_h = param.window_w = 3;
  324. param.stride_h = param.stride_w = 2;
  325. param.pad_h = param.pad_w = 1;
  326. std::vector<SmallVector<TensorShape>> shapes;
  327. shapes.push_back({{32, 32, 215, 215}, {}});
  328. shapes.push_back({{32, 32, 128, 128}, {}});
  329. shapes.push_back({{8, 256, 100, 100}, {}});
  330. shapes.push_back({{1, 256, 100, 100}, {}});
  331. shapes.push_back({{1, 32, 100, 100}, {}});
  332. shapes.push_back({{1, 256, 80, 80}, {}});
  333. shapes.push_back({{1, 256, 60, 60}, {}});
  334. shapes.push_back({{1, 256, 30, 30}, {}});
  335. param.window_h = param.window_w = 3;
  336. param.stride_h = param.stride_w = 2;
  337. param.pad_h = param.pad_w = 1;
  338. printf("Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n", param.window_h,
  339. param.stride_h, param.pad_h, static_cast<int>(param.mode));
  340. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {0, 1, 2, 3}}, {1, {0}});
  341. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}});
  342. benchmark_impl<Pooling>(param, shapes, RUNS, {2, {0, 1}}, {1, {0}});
  343. }
  344. #endif
  345. } // namespace test
  346. } // namespace megdnn
  347. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台