You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling.cpp 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /**
  2. * \file dnn/test/arm_common/pooling.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/arm_common/fixture.h"
  12. #include "test/common/benchmarker.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/pooling.h"
  15. #include "test/common/rng.h"
  16. namespace megdnn {
  17. namespace test {
  18. TEST_F(ARM_COMMON, POOLING) {
  19. using Param = param::Pooling;
  20. // clang-format off
  21. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  22. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  23. for (size_t p: {1, 2})
  24. {
  25. Param param;
  26. param.mode = Param::Mode::MAX;
  27. param.window_h = param.window_w = 3;
  28. param.stride_h = param.stride_w = 2;
  29. param.pad_h = param.pad_w = p;
  30. Checker<Pooling> checker(handle());
  31. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  32. param.mode = Param::Mode::AVERAGE;
  33. param.window_h = param.window_w = 3;
  34. param.stride_h = param.stride_w = 2;
  35. param.pad_h = param.pad_w = p;
  36. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  37. param.mode = Param::Mode::MAX;
  38. param.window_h = param.window_w = 4;
  39. param.stride_h = param.stride_w = 2;
  40. param.pad_h = param.pad_w = p;
  41. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  42. param.mode = Param::Mode::MAX;
  43. param.window_h = param.window_w = 5;
  44. param.stride_h = param.stride_w = 2;
  45. param.pad_h = param.pad_w = p;
  46. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  47. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  48. }
  49. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  50. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  51. for (size_t p: {1, 2})
  52. {
  53. Param param;
  54. param.mode = Param::Mode::AVERAGE_COUNT_EXCLUDE_PADDING;
  55. param.window_h = param.window_w = 3;
  56. param.stride_h = param.stride_w = 1;
  57. param.pad_h = param.pad_w = p;
  58. Checker<Pooling> checker(handle());
  59. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  60. }
  61. // clang-format on
  62. }
  63. TEST_F(ARM_COMMON, POOLING_INT8_W2x2_S2x2) {
  64. // clang-format off
  65. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  66. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  67. for (size_t ph: {0, 1})
  68. for (size_t pw: {0, 1})
  69. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  70. {
  71. Checker<Pooling> checker(handle());
  72. checker.set_dtype(0, dtype::Int8());
  73. param::Pooling param;
  74. param.mode = param::Pooling::Mode::MAX;
  75. param.pad_h = ph;
  76. param.pad_w = pw;
  77. param.stride_h = param.stride_w = 2;
  78. param.window_h = param.window_w = 2;
  79. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  80. }
  81. // clang-format on
  82. }
  83. TEST_F(ARM_COMMON, POOLING_INT8_W3x3_S2x2) {
  84. // clang-format off
  85. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  86. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  87. for (size_t ph: {0, 1, 2})
  88. for (size_t pw: {0, 1, 2})
  89. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  90. {
  91. Checker<Pooling> checker(handle());
  92. checker.set_dtype(0, dtype::Int8());
  93. param::Pooling param;
  94. param.mode = param::Pooling::Mode::MAX;
  95. param.pad_h = ph;
  96. param.pad_w = pw;
  97. param.stride_h = param.stride_w = 2;
  98. param.window_h = param.window_w = 3;
  99. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  100. }
  101. // clang-format on
  102. }
  103. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  104. TEST_F(ARM_COMMON, POOLING_FP16) {
  105. Checker<Pooling> checker(handle());
  106. checker.set_dtype(0, dtype::Float16{})
  107. .set_dtype(1, dtype::Float16{})
  108. .set_epsilon(3e-3);
  109. using Param = param::Pooling;
  110. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  111. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  112. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  113. for (size_t window : {2, 3}) {
  114. Param param;
  115. param.mode = mode;
  116. param.window_h = param.window_w = window;
  117. param.stride_h = param.stride_w = 1;
  118. param.pad_h = param.pad_w = window / 2;
  119. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  120. //! == 3)
  121. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  122. //! test for SH = SW = 2 && FH = FW = 2
  123. param.stride_h = param.stride_w = 2;
  124. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  125. }
  126. }
  127. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  128. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  129. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  130. for (size_t ph : {0, 1, 2})
  131. for (size_t pw : {0, 1, 2})
  132. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  133. param::Pooling param;
  134. param.mode = param::Pooling::Mode::MAX;
  135. param.pad_h = ph;
  136. param.pad_w = pw;
  137. param.stride_h = param.stride_w = 2;
  138. param.window_h = param.window_w = 3;
  139. checker.set_param(param).exec(
  140. TensorShapeArray{{2, 3, ih, iw}, {}});
  141. }
  142. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  143. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  144. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  145. for (size_t p : {1, 2}) {
  146. Param param;
  147. param.mode = Param::Mode::MAX;
  148. param.window_h = param.window_w = 4;
  149. param.stride_h = param.stride_w = 2;
  150. param.pad_h = param.pad_w = p;
  151. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  152. }
  153. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  154. for (size_t ih : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  155. for (size_t iw : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  156. for (size_t p : {1, 2}) {
  157. Param param;
  158. param.mode = Param::Mode::MAX;
  159. param.window_h = param.window_w = 5;
  160. param.stride_h = param.stride_w = 2;
  161. param.pad_h = param.pad_w = p;
  162. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  163. }
  164. }
  165. #endif
  166. TEST_F(ARM_COMMON, POOLING_QUANTIZED) {
  167. Checker<Pooling> checker(handle());
  168. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  169. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  170. using Param = param::Pooling;
  171. for (auto type : std::vector<DType>{
  172. dtype::QuantizedS8(1.1f),
  173. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  174. if (type.enumv() == DTypeEnum::QuantizedS8) {
  175. checker.set_rng(0, &rng1);
  176. } else {
  177. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  178. checker.set_rng(0, &rng2);
  179. }
  180. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  181. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  182. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  183. for (size_t window : {2, 3}) {
  184. Param param;
  185. param.mode = mode;
  186. param.window_h = param.window_w = window;
  187. param.stride_h = param.stride_w = 1;
  188. param.pad_h = param.pad_w = window / 2;
  189. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  190. //! FH
  191. //! == 3)
  192. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  193. //! test for SH = SW = 2 && FH = FW = 2
  194. param.stride_h = param.stride_w = 2;
  195. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  196. }
  197. }
  198. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  199. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  200. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  201. for (size_t ph : {0, 1, 2})
  202. for (size_t pw : {0, 1, 2})
  203. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  204. param::Pooling param;
  205. param.mode = param::Pooling::Mode::MAX;
  206. param.pad_h = ph;
  207. param.pad_w = pw;
  208. param.window_h = param.window_w = 3;
  209. param.stride_h = param.stride_w = 2;
  210. checker.set_param(param).exec(
  211. TensorShapeArray{{2, 3, ih, iw}, {}});
  212. }
  213. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  214. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  215. for (size_t iw :
  216. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  217. for (size_t p : {1, 2}) {
  218. Param param;
  219. param.mode = Param::Mode::MAX;
  220. param.window_h = param.window_w = 4;
  221. param.stride_h = param.stride_w = 2;
  222. param.pad_h = param.pad_w = p;
  223. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  224. }
  225. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  226. for (size_t ih : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  227. for (size_t iw : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  228. for (size_t p : {1, 2}) {
  229. Param param;
  230. param.mode = Param::Mode::MAX;
  231. param.window_h = param.window_w = 5;
  232. param.stride_h = param.stride_w = 2;
  233. param.pad_h = param.pad_w = p;
  234. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  235. }
  236. }
  237. }
  238. #if MEGDNN_WITH_BENCHMARK
  239. void benchmark_nchw44_fp32(Handle* handle) {
  240. using Param = param::Pooling;
  241. auto run = [&](size_t n, size_t c, size_t h, size_t w, size_t filter, size_t stride,
  242. size_t pad, Param::Mode mode) {
  243. Param param;
  244. param.window_h = param.window_w = filter;
  245. param.stride_h = param.stride_w = stride;
  246. param.pad_h = param.pad_w = pad;
  247. param.format = Param::Format::NCHW;
  248. param.mode = mode;
  249. TensorShape nchw_shape = {n, c, h, w};
  250. TensorShape nchw44_shape = {n, c / 4, h, w, 4};
  251. TensorLayout dst_layout;
  252. auto opr = handle->create_operator<Pooling>();
  253. opr->param() = param;
  254. opr->deduce_layout({nchw_shape, dtype::Float32()}, dst_layout);
  255. float calc_amount =
  256. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  257. Benchmarker<Pooling> benchmarker_float_nchw(handle);
  258. Benchmarker<Pooling> benchmarker_float_nchw44(handle);
  259. Benchmarker<Pooling> benchmarker_int_nchw44(handle);
  260. size_t RUN = 500;
  261. auto t1 = benchmarker_float_nchw.set_display(false)
  262. .set_times(RUN)
  263. .set_param(param)
  264. .exec({nchw_shape, {}});
  265. param.format = Param::Format::NCHW44;
  266. auto t2 = benchmarker_int_nchw44.set_display(false)
  267. .set_times(RUN)
  268. .set_param(param)
  269. .execl({{nchw44_shape, dtype::QuantizedS8(1.0)},
  270. {{}, dtype::QuantizedS8(1.0)}});
  271. auto t3 = benchmarker_float_nchw44.set_display(false)
  272. .set_times(RUN)
  273. .set_param(param)
  274. .exec({nchw44_shape, {}});
  275. printf("{%zu %zu %zu %zu} filter = %zu, stride = %zu pad = %zu\n"
  276. "nchw_fp32={%.3f ms, %.3f Mflops}, "
  277. "nchw44_int={%.3f ms, %.3f Mflops}, "
  278. "nchw44_fp32={%.3f ms, %.3f Mflops, speed_up %f}\n\n",
  279. n, c, h, w, filter, stride, pad, t1 / RUN,
  280. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  281. calc_amount / (t2 / RUN * 1000), t3 / RUN,
  282. calc_amount / (t3 / RUN * 1000), t1 / t3);
  283. };
  284. // Resnet50
  285. run(1, 64, 112, 112, 3, 2, 1, param::Pooling::Mode::MAX);
  286. run(1, 2048, 7, 7, 7, 1, 0, param::Pooling::Mode::AVERAGE);
  287. // VGG16
  288. run(1, 64, 224, 224, 2, 2, 0, param::Pooling::Mode::MAX);
  289. run(1, 128, 112, 112, 2, 2, 0, param::Pooling::Mode::MAX);
  290. run(1, 256, 56, 56, 2, 2, 0, param::Pooling::Mode::MAX);
  291. run(1, 512, 28, 28, 2, 2, 0, param::Pooling::Mode::MAX);
  292. run(1, 512, 14, 14, 2, 2, 0, param::Pooling::Mode::MAX);
  293. }
  294. TEST_F(ARM_COMMON, BENCHMARK_POOLING_NCHW44_FP32) {
  295. benchmark_nchw44_fp32(handle());
  296. }
  297. TEST_F(ARM_COMMON_MULTI_THREADS, BENCHMARK_POOLING_NCHW44_FP32) {
  298. benchmark_nchw44_fp32(handle());
  299. }
  300. TEST_F(ARM_COMMON, BENCHMARK_POOLING_INT8_W3x3_S2x2) {
  301. using Param = param::Pooling;
  302. auto run = [&](const TensorShapeArray& shapes, Param param) {
  303. auto handle_naive = create_cpu_handle(2);
  304. TensorLayoutArray layouts;
  305. layouts.emplace_back(shapes[0], dtype::Int8());
  306. layouts.emplace_back(shapes[1], dtype::Int8());
  307. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  308. Benchmarker<Pooling> benchmarker_float(handle());
  309. Benchmarker<Pooling> benchmarker_int(handle());
  310. size_t RUN = 10;
  311. auto t1 = benchmarker_naive.set_display(false)
  312. .set_times(RUN)
  313. .set_param(param)
  314. .exec(shapes);
  315. auto t2 = benchmarker_float.set_display(false)
  316. .set_times(RUN)
  317. .set_param(param)
  318. .exec(shapes);
  319. auto t3 = benchmarker_int.set_display(false)
  320. .set_times(RUN)
  321. .set_param(param)
  322. .execl(layouts);
  323. printf("naive=%.3fms float=%.3fms, int=%.3fms\n", t1 / RUN, t2 / RUN, t3 / RUN);
  324. auto speedup = t2 / t3;
  325. ASSERT_GE(speedup, 2.0);
  326. };
  327. Param param;
  328. param.window_h = param.window_w = 3;
  329. param.stride_h = param.stride_w = 2;
  330. param.pad_h = param.pad_w = 1;
  331. std::cout << "3x3 with 2x2 stride max pooling:" << std::endl;
  332. run({{1, 3, 640, 480}, {}}, param);
  333. }
  334. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W4x4_S2x2) {
  335. using Param = param::Pooling;
  336. auto run = [&](const TensorShapeArray& shapes, Param param) {
  337. std::cout << "N:" << shapes[0][0] << " "
  338. << "IC:" << shapes[0][1] << " "
  339. << "IH:" << shapes[0][2] << " "
  340. << "IW:" << shapes[0][3] << std::endl;
  341. auto handle_naive = create_cpu_handle(2);
  342. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  343. Benchmarker<Pooling> benchmarker_float(handle());
  344. size_t RUN = 10;
  345. auto t1 = benchmarker_naive.set_display(false)
  346. .set_times(RUN)
  347. .set_param(param)
  348. .exec(shapes);
  349. auto t2 = benchmarker_float.set_display(false)
  350. .set_times(RUN)
  351. .set_param(param)
  352. .exec(shapes);
  353. TensorLayout dst_layout;
  354. auto opr = handle()->create_operator<Pooling>();
  355. opr->param() = param;
  356. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  357. float calc_amount =
  358. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  359. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n", t1 / RUN,
  360. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  361. calc_amount / (t2 / RUN * 1000));
  362. };
  363. Param param;
  364. param.window_h = param.window_w = 4;
  365. param.stride_h = param.stride_w = 2;
  366. param.pad_h = param.pad_w = 1;
  367. std::cout << "4x4 with 2x2 stride max pooling:" << std::endl;
  368. run({{1, 24, 160, 128}, {}}, param);
  369. run({{1, 4, 240, 135}, {}}, param);
  370. run({{1, 32, 120, 67}, {}}, param);
  371. run({{1, 64, 60, 33}, {}}, param);
  372. }
  373. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W5x5_S2x2) {
  374. using Param = param::Pooling;
  375. auto run = [&](const TensorShapeArray& shapes, Param param) {
  376. std::cout << "N:" << shapes[0][0] << " "
  377. << "IC:" << shapes[0][1] << " "
  378. << "IH:" << shapes[0][2] << " "
  379. << "IW:" << shapes[0][3] << std::endl;
  380. auto handle_naive = create_cpu_handle(2);
  381. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  382. Benchmarker<Pooling> benchmarker_float(handle());
  383. size_t RUN = 10;
  384. auto t1 = benchmarker_naive.set_display(false)
  385. .set_times(RUN)
  386. .set_param(param)
  387. .exec(shapes);
  388. auto t2 = benchmarker_float.set_display(false)
  389. .set_times(RUN)
  390. .set_param(param)
  391. .exec(shapes);
  392. TensorLayout dst_layout;
  393. auto opr = handle()->create_operator<Pooling>();
  394. opr->param() = param;
  395. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  396. float calc_amount =
  397. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  398. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n", t1 / RUN,
  399. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  400. calc_amount / (t2 / RUN * 1000));
  401. };
  402. Param param;
  403. param.window_h = param.window_w = 5;
  404. param.stride_h = param.stride_w = 2;
  405. param.pad_h = param.pad_w = 1;
  406. std::cout << "5x5 with 2x2 stride max pooling:" << std::endl;
  407. run({{1, 24, 160, 128}, {}}, param);
  408. run({{1, 4, 240, 135}, {}}, param);
  409. run({{1, 32, 120, 67}, {}}, param);
  410. run({{1, 64, 60, 33}, {}}, param);
  411. }
  412. TEST_F(ARM_COMMON, BENCHMARK_POOLING_FP16) {
  413. using Param = param::Pooling;
  414. auto run = [&](const TensorShapeArray& shapes, Param param) {
  415. TensorLayoutArray layouts;
  416. layouts.emplace_back(shapes[0], dtype::Float16());
  417. layouts.emplace_back(shapes[1], dtype::Float16());
  418. Benchmarker<Pooling> benchmarker_float(handle());
  419. Benchmarker<Pooling> benchmarker_half(handle());
  420. size_t RUN = 10;
  421. auto tf = benchmarker_float.set_display(false)
  422. .set_times(RUN)
  423. .set_param(param)
  424. .exec(shapes) /
  425. RUN;
  426. auto th = benchmarker_half.set_display(false)
  427. .set_times(RUN)
  428. .set_param(param)
  429. .execl(layouts) /
  430. RUN;
  431. TensorLayout dst_layout;
  432. auto opr = handle()->create_operator<Pooling>();
  433. opr->param() = param;
  434. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  435. float computations = dst_layout.total_nr_elems() * param.window_h *
  436. param.window_w / (1024.f * 1024 * 1024);
  437. printf("float=%.3fms %f gflops, float16=%.3fms %f gflops speedup: %f\n", tf,
  438. computations / tf * 1e3, th, computations / th * 1e3, tf / th);
  439. };
  440. Param param;
  441. param.window_h = param.window_w = 2;
  442. param.stride_h = param.stride_w = 1;
  443. param.pad_h = param.pad_w = 1;
  444. printf("2x2 with 1x1 stride max pooling:\n");
  445. run({{1, 3, 640, 480}, {}}, param);
  446. for (size_t oh : {640, 128})
  447. for (size_t ow : {480, 112}) {
  448. param.window_h = param.window_w = 3;
  449. param.stride_h = param.stride_w = 2;
  450. param.pad_h = param.pad_w = 1;
  451. param.mode = Param::Mode::AVERAGE;
  452. printf("3x3 with 2x2 stride average pooling.\n");
  453. run({{1, 3, oh, ow}, {}}, param);
  454. for (size_t pw : {2, 3, 4, 5}) {
  455. param.window_h = param.window_w = pw;
  456. param.stride_h = param.stride_w = 2;
  457. param.pad_h = param.pad_w = 1;
  458. param.mode = Param::Mode::MAX;
  459. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  460. run({{1, 3, oh, ow}, {}}, param);
  461. }
  462. }
  463. }
  464. TEST_F(ARM_COMMON, BENCHMARK_POOLING_QUANTIZED) {
  465. using Param = param::Pooling;
  466. auto run = [&](const TensorShapeArray& shapes, Param param) {
  467. auto handle_naive = create_cpu_handle(2);
  468. TensorLayoutArray layouts;
  469. layouts.emplace_back(shapes[0], dtype::QuantizedS8(1.1f));
  470. layouts.emplace_back(shapes[1], dtype::QuantizedS8(1.1f));
  471. Benchmarker<Pooling> benchmarker_int(handle());
  472. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  473. size_t RUN = 10;
  474. auto time_int =
  475. benchmarker_int.set_display(false).set_times(RUN).set_param(param).exec(
  476. shapes) /
  477. RUN;
  478. auto time_naive = benchmarker_naive.set_display(false)
  479. .set_times(RUN)
  480. .set_param(param)
  481. .execl(layouts) /
  482. RUN;
  483. TensorLayout dst_layout;
  484. auto opr = handle()->create_operator<Pooling>();
  485. opr->param() = param;
  486. opr->deduce_layout({shapes[0], dtype::QuantizedS8(1.1f)}, dst_layout);
  487. float computations = dst_layout.total_nr_elems() * param.window_h *
  488. param.window_w / (1024.f * 1024 * 1024);
  489. printf("naive=%.3fms %f gflops, int8=%.3fms %f gflops speedup: %f\n",
  490. time_naive, computations / time_naive * 1e3, time_int,
  491. computations / time_int * 1e3, time_naive / time_int);
  492. };
  493. Param param;
  494. param.window_h = param.window_w = 2;
  495. param.stride_h = param.stride_w = 1;
  496. param.pad_h = param.pad_w = 1;
  497. printf("2x2 with 1x1 stride max pooling:\n");
  498. run({{1, 3, 640, 480}, {}}, param);
  499. // clang-format off
  500. for (size_t oh : {640, 128})
  501. for (size_t ow : {480, 112})
  502. for (size_t pw : {2, 3, 4, 5}) {
  503. param.window_h = param.window_w = pw;
  504. param.stride_h = param.stride_w = 2;
  505. param.pad_h = param.pad_w = 1;
  506. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  507. run({{1, 3, oh, ow}, {}}, param);
  508. }
  509. // clang-format on
  510. }
  511. #endif
  512. } // namespace test
  513. } // namespace megdnn
  514. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台