You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

elemwise_bmark.cpp 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /**
  2. * \file dnn/test/x86/elemwise_bmark.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/x86/fixture.h"
  12. #include "megdnn/oprs.h"
  13. #include "test/common/benchmarker.h"
  14. #include "test/common/checker.h"
  15. #include "test/common/rng.h"
  16. using namespace megdnn;
  17. using namespace test;
  18. #define TEST_IN_DIFF_DISTRUBUTION(proportion_of_inf, dataset_number) \
  19. max_val = 88.3762626647949f / (1 - proportion_of_inf); \
  20. UniformFloatRNG rng##dataset_number(0.f, max_val); \
  21. B.set_rng(0, &rng##dataset_number); \
  22. B.execs({{355600}, {}});
  23. TEST_F(X86, BENCHMARK_ELEM_EXP_BASED_OPTRS) {
  24. Benchmarker<ElemwiseForward> B(handle());
  25. using Mode = ElemwiseForward::Param::Mode;
  26. // UniformFloatWithZeroRNG rng(80, 100, 0.1);
  27. printf("Test Optr exp(x)\n");
  28. B.set_param(Mode::EXP);
  29. B.execs({{355600}, {}});
  30. float max_val = 0;
  31. TEST_IN_DIFF_DISTRUBUTION(0.25, 1)
  32. TEST_IN_DIFF_DISTRUBUTION(0.5, 2)
  33. TEST_IN_DIFF_DISTRUBUTION(0.75, 3)
  34. TEST_IN_DIFF_DISTRUBUTION(0.9999, 4)
  35. printf("Test Optr tanh(x)\n");
  36. B.set_param(Mode::TANH);
  37. B.execs({{355600}, {}});
  38. max_val = 0;
  39. TEST_IN_DIFF_DISTRUBUTION(0.25, 5)
  40. TEST_IN_DIFF_DISTRUBUTION(0.5, 6)
  41. TEST_IN_DIFF_DISTRUBUTION(0.75, 7)
  42. TEST_IN_DIFF_DISTRUBUTION(0.9999, 8)
  43. printf("Test Optr fast_tanh(x)\n");
  44. B.set_param(Mode::FAST_TANH);
  45. B.execs({{355600}, {}});
  46. printf("Test Optr sigmoid(x)\n");
  47. B.set_param(Mode::SIGMOID);
  48. B.execs({{355600}, {}});
  49. max_val = 0;
  50. TEST_IN_DIFF_DISTRUBUTION(0.25, 13)
  51. TEST_IN_DIFF_DISTRUBUTION(0.5, 14)
  52. TEST_IN_DIFF_DISTRUBUTION(0.75, 15)
  53. TEST_IN_DIFF_DISTRUBUTION(0.9999, 16)
  54. printf("Test Optr tanh_grad(x)\n");
  55. B.set_param(Mode::TANH_GRAD);
  56. B.execs({{355600}, {355600}, {}});
  57. printf("Test Optr fast_tanh_grad(x)\n");
  58. B.set_param(Mode::FAST_TANH_GRAD);
  59. B.execs({{355600}, {355600}, {}});
  60. }
  61. // 1. Unary
  62. #define BENCHMARK_UNARY(Optr, size) \
  63. printf("Test for %s \n", #Optr); \
  64. B.set_param(Mode::Optr); \
  65. B.execs( \
  66. {{ \
  67. 4, \
  68. 4, \
  69. 4, \
  70. 1 + size / 64, \
  71. }, \
  72. {}});
  73. // 2. Binary
  74. #define BENCHMARK_BINARY(Optr, size) \
  75. B.set_param(Mode::Optr); \
  76. B.execs({{size}, {size}, {}});
  77. #define BENCHMARK_BINARY_SCALAR(Optr, size) \
  78. B.set_param(Mode::Optr); \
  79. B.execs({{size}, {1}, {}});
  80. #define BENCHMARK_BINARY_1C11(Optr, chan) \
  81. B.set_param(Mode::Optr); \
  82. B.execs({{9, chan, 33, 127}, {1, chan, 1, 1}, {}});
  83. #define BENCHMARK_BINARY_ALL_KINDS(Optr, size) \
  84. printf("Test for %s \n", #Optr); \
  85. BENCHMARK_BINARY(Optr, size) \
  86. BENCHMARK_BINARY_SCALAR(Optr, size) \
  87. BENCHMARK_BINARY_1C11(Optr, (1 + size / 37719))
  88. // 3. Ternary
  89. #define BENCHMARK_TERNARY(Optr, size) \
  90. B.set_param(Mode::Optr); \
  91. B.execs({{size}, {size}, {size}, {}});
  92. #define BENCHMARK_TERNARY_SCALAR(Optr, size) \
  93. B.set_param(Mode::Optr); \
  94. B.execs({{size}, {size}, {1}, {}});
  95. #define BENCHMARK_TERNARY_1C11(Optr, chan) \
  96. B.set_param(Mode::Optr); \
  97. B.execs({{1, chan, 1, 1}, {9, chan, 33, 127}, {1, chan, 1, 1}, {}});
  98. #define BENCHMARK_TERNARY_ALL_KINDS(Optr, size) \
  99. printf("Test for %s \n", #Optr); \
  100. BENCHMARK_TERNARY(Optr, size) \
  101. BENCHMARK_TERNARY_SCALAR(Optr, size) \
  102. BENCHMARK_TERNARY_1C11(Optr, (size / 37719))
  103. #define BENCHMARK_CASE_INT(size) \
  104. BENCHMARK_BINARY_ALL_KINDS(ADD, size) \
  105. BENCHMARK_BINARY_ALL_KINDS(SUB, size) \
  106. BENCHMARK_BINARY_ALL_KINDS(MUL, size) \
  107. BENCHMARK_BINARY_ALL_KINDS(TRUE_DIV, size) \
  108. BENCHMARK_BINARY_ALL_KINDS(MIN, size) \
  109. BENCHMARK_BINARY_ALL_KINDS(MAX, size) \
  110. BENCHMARK_UNARY(RELU, size) \
  111. BENCHMARK_UNARY(ABS, size) \
  112. BENCHMARK_BINARY_ALL_KINDS(FUSE_ADD_RELU, size) \
  113. BENCHMARK_TERNARY_ALL_KINDS(FUSE_MUL_ADD3, size)
  114. #define BENCHMARK_CASE_FLOAT(size) \
  115. BENCHMARK_CASE_INT(size) \
  116. BENCHMARK_BINARY_ALL_KINDS(FUSE_ADD_TANH, size) \
  117. BENCHMARK_BINARY_ALL_KINDS(FUSE_ADD_SIGMOID, size)
  118. TEST_F(X86, BENCHMARK_ELEM_EVERY_DTYPE) {
  119. Benchmarker<ElemwiseForward> B(handle());
  120. using Mode = ElemwiseForward::Param::Mode;
  121. printf("\nTest case float32:\n");
  122. B.set_dtype(0, dtype::Float32());
  123. B.set_dtype(1, dtype::Float32());
  124. B.set_dtype(2, dtype::Float32());
  125. BENCHMARK_CASE_FLOAT(1556011)
  126. // printf("\nTest case int32:\n");
  127. // B.set_dtype(0, dtype::Int32());
  128. // B.set_dtype(1, dtype::Int32());
  129. // B.set_dtype(2, dtype::Int32());
  130. // BENCHMARK_CASE_INT(1556011)
  131. // printf("\nTest case int16:\n");
  132. // B.set_dtype(0, dtype::Int16());
  133. // B.set_dtype(1, dtype::Int16());
  134. // B.set_dtype(2, dtype::Int16());
  135. // BENCHMARK_CASE_INT(1556011)
  136. // printf("\nTest case int8:\n");
  137. // B.set_dtype(0, dtype::Int8());
  138. // B.set_dtype(1, dtype::Int8());
  139. // B.set_dtype(2, dtype::Int8());
  140. // BENCHMARK_CASE_INT(1556011)
  141. }
  142. #if MEGDNN_WITH_BENCHMARK
  143. namespace {
  144. void run_elemwise_benchmark(
  145. const TensorShapeArray& shapes, param::Elemwise::Mode mode,
  146. const char* mode_str, DType type, Handle* handle_bench) {
  147. auto handle_fallback = create_cpu_handle(1);
  148. Benchmarker<Elemwise> benchmarker_bench(handle_bench);
  149. Benchmarker<Elemwise> benchmarker_fallback(handle_fallback.get());
  150. float throughput = 0;
  151. SmallVector<TensorLayout> layouts;
  152. std::string src_strs;
  153. for (size_t i = 0; i < shapes.size(); i++) {
  154. layouts.emplace_back(shapes[i], type);
  155. throughput += layouts.back().span().dist_byte();
  156. src_strs += layouts.back().to_string();
  157. if (i != shapes.size() - 1) {
  158. src_strs += ",";
  159. }
  160. }
  161. constexpr size_t RUN = 50;
  162. benchmarker_fallback.set_times(RUN).set_display(false);
  163. benchmarker_bench.set_times(RUN).set_display(false);
  164. benchmarker_fallback.set_param(mode);
  165. benchmarker_bench.set_param(mode);
  166. TensorLayout dst_layout;
  167. auto opr = handle_bench->create_operator<Elemwise>();
  168. opr->param() = mode;
  169. opr->deduce_layout(layouts, dst_layout);
  170. float computations =
  171. dst_layout.total_nr_elems() * (std::max<size_t>(shapes.size(), 2) - 1);
  172. throughput += dst_layout.span().dist_byte();
  173. computations *= (1e3 / (1024.0 * 1024));
  174. throughput *= (1e3 / (1024.0 * 1024));
  175. layouts.emplace_back(dst_layout);
  176. auto fallback_time = benchmarker_fallback.execl(layouts) / RUN;
  177. auto bench_time = benchmarker_bench.execl(layouts) / RUN;
  178. float fallback_flops = computations / fallback_time;
  179. float bench_flops = computations / bench_time;
  180. float fallback_thr = throughput / fallback_time;
  181. float bench_thr = throughput / bench_time;
  182. printf("%s = %s (type: %s, mode: %s) cpu=%fMFLOPS %fMB/s, bench=%fMFLOPS "
  183. "%fMB/s "
  184. "computations: %fx, throughput: %fx\n",
  185. src_strs.c_str(), dst_layout.to_string().c_str(), type.name(), mode_str,
  186. fallback_flops, fallback_thr, bench_flops, bench_thr,
  187. bench_flops / fallback_flops, bench_thr / fallback_thr);
  188. }
  189. } // namespace
  190. #define INT_RUN(shape, mode) \
  191. run_elemwise_benchmark(shape, mode, #mode, dtype::Int8{}, handle()); \
  192. run_elemwise_benchmark(shape, mode, #mode, dtype::Int16{}, handle()); \
  193. run_elemwise_benchmark(shape, mode, #mode, dtype::Int32{}, handle());
  194. #define FLOAT_RUN(shape, mode) \
  195. run_elemwise_benchmark(shape, mode, #mode, dtype::Float32{}, handle()); \
  196. run_elemwise_benchmark(shape, mode, #mode, dtype::Float16{}, handle());
  197. #define BENCHMARK_CASES(shape) \
  198. INT_BENCHMARK_CASES(shape) \
  199. FLOAT_BENCHMARK_CASES(shape)
  200. TEST_F(X86, BENCHMARK_UNARY) {
  201. #define INT_BENCHMARK_CASES(shape) \
  202. INT_RUN(shape, Mode::RELU); \
  203. INT_RUN(shape, Mode::ABS);
  204. #define FLOAT_BENCHMARK_CASES(shape) \
  205. FLOAT_RUN(shape, Mode::RELU); \
  206. FLOAT_RUN(shape, Mode::ABS); \
  207. FLOAT_RUN(shape, Mode::SIGMOID); \
  208. FLOAT_RUN(shape, Mode::EXP); \
  209. FLOAT_RUN(shape, Mode::TANH); \
  210. FLOAT_RUN(shape, Mode::FAST_TANH);
  211. using Mode = param::Elemwise::Mode;
  212. BENCHMARK_CASES({{10000}});
  213. BENCHMARK_CASES({{50000}});
  214. #undef INT_BENCHMARK_CASES
  215. #undef FLOAT_BENCHMARK_CASES
  216. }
  217. TEST_F(X86, BENCHMARK_BINARY) {
  218. #define INT_BENCHMARK_CASES(shape) \
  219. INT_RUN(shape, Mode::MIN); \
  220. INT_RUN(shape, Mode::MAX); \
  221. INT_RUN(shape, Mode::ADD); \
  222. INT_RUN(shape, Mode::SUB); \
  223. INT_RUN(shape, Mode::MUL); \
  224. INT_RUN(shape, Mode::RMULH); \
  225. INT_RUN(shape, Mode::FUSE_ADD_RELU);
  226. #define FLOAT_BENCHMARK_CASES(shape) \
  227. FLOAT_RUN(shape, Mode::MIN); \
  228. FLOAT_RUN(shape, Mode::MAX); \
  229. FLOAT_RUN(shape, Mode::ADD); \
  230. FLOAT_RUN(shape, Mode::SUB); \
  231. FLOAT_RUN(shape, Mode::MUL); \
  232. FLOAT_RUN(shape, Mode::POW); \
  233. FLOAT_RUN(shape, Mode::TRUE_DIV); \
  234. FLOAT_RUN(shape, Mode::FUSE_ADD_RELU);
  235. using Mode = param::Elemwise::Mode;
  236. TensorShapeArray shapes = {{1, 112, 28, 28}, {1, 112, 28, 28}};
  237. BENCHMARK_CASES(shapes);
  238. shapes = {{1, 16, 1, 1}, {1, 16, 112, 112}};
  239. BENCHMARK_CASES(shapes);
  240. shapes = {{1, 448, 7, 7}, {1, 448, 7, 7}};
  241. BENCHMARK_CASES(shapes);
  242. #undef INT_BENCHMARK_CASES
  243. #undef FLOAT_BENCHMARK_CASES
  244. }
  245. TEST_F(X86, BENCHMARK_TERNARY_FMA3) {
  246. #define INT_BENCHMARK_CASES(shape) INT_RUN(shape, Mode::FUSE_MUL_ADD3);
  247. #define FLOAT_BENCHMARK_CASES(shape) FLOAT_RUN(shape, Mode::FUSE_MUL_ADD3);
  248. using Mode = param::Elemwise::Mode;
  249. TensorShapeArray shapes = {{30, 40, 70}, {30, 40, 70}, {30, 40, 70}};
  250. BENCHMARK_CASES(shapes);
  251. shapes = {{1, 4, 1, 1}, {3, 4, 5, 7}, {1, 4, 1, 1}};
  252. BENCHMARK_CASES(shapes);
  253. shapes = {{3, 4, 5, 7}, {3, 4, 5, 7}, {1, 1, 1, 1}};
  254. BENCHMARK_CASES(shapes);
  255. #undef INT_BENCHMARK_CASES
  256. #undef FLOAT_BENCHMARK_CASES
  257. }
  258. #undef BENCHMARK_CASES
  259. #undef INT_RUN
  260. #undef FLOAT_RUN
  261. #endif