You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convolution.cpp 40 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /**
  2. * \file dnn/test/cuda/convolution.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/oprs.h"
  14. #include "megdnn/opr_param_defs.h"
  15. #include "test/cuda/fixture.h"
  16. #include "test/common/tensor.h"
  17. #include "test/common/workspace_wrapper.h"
  18. #include "test/common/checker.h"
  19. #include "test/common/convolution.h"
  20. #include "test/common/rng.h"
  21. #include "test/cuda/benchmark.h"
  22. #include "src/cuda/utils.h"
  23. #include "test/common/accuracy_shake_checker.h"
  24. #define V1(x) #x
  25. #define V(x) V1(x)
  26. #define CUDNN_VERSION_STRING \
  27. "v" V(CUDNN_MAJOR) "." V(CUDNN_MINOR) "." V(CUDNN_PATCHLEVEL)
  28. namespace megdnn {
  29. namespace test {
  30. TEST_F(CUDA, CONVOLUTION_8X8X32) {
  31. if (!cuda::is_compute_capability_required(6, 1)) {
  32. printf("Skip CUDA.CONVOLUTION_8X8X32 test as current device"
  33. "doesn't support\n");
  34. return;
  35. }
  36. using namespace convolution;
  37. std::vector<TestArg> args;
  38. {
  39. auto v = get_args();
  40. for (auto&& a : v) {
  41. args.push_back(std::move(a));
  42. }
  43. }
  44. {
  45. auto v = get_dilated_args();
  46. for (auto&& a : v) {
  47. args.push_back(std::move(a));
  48. }
  49. }
  50. {
  51. auto v = get_chanwise_args();
  52. for (auto&& a : v) {
  53. args.push_back(std::move(a));
  54. }
  55. }
  56. Checker<ConvolutionForward> checker(handle_cuda());
  57. UniformIntRNG rng(-4, 4);
  58. for (auto arg : args) {
  59. arg.param.format = param::Convolution::Format::NHWC;
  60. arg.src = cvt_src_or_dst_nchw2nhwc(arg.src);
  61. arg.filter = cvt_filter_nchw2nhwc(arg.filter);
  62. checker.set_dtype(0, dtype::Int8())
  63. .set_dtype(1, dtype::Int8())
  64. .set_dtype(2, dtype::Int32())
  65. .set_param(arg.param)
  66. .set_rng(0, &rng)
  67. .set_rng(1, &rng)
  68. .execs({arg.src, arg.filter, {}});
  69. }
  70. }
  71. TEST_F(CUDA, CONVOLUTION_FORWARD) {
  72. using namespace convolution;
  73. std::vector<TestArg> args = get_args();
  74. Checker<ConvolutionForward> checker(handle_cuda());
  75. NormalRNG default_rng;
  76. for (auto&& arg : args) {
  77. float scale =
  78. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  79. UniformFloatRNG rng(scale, 2 * scale);
  80. checker.set_dtype(0, dtype::Float32())
  81. .set_dtype(1, dtype::Float32())
  82. .set_dtype(2, dtype::Float32())
  83. .set_rng(0, &default_rng)
  84. .set_rng(1, &default_rng)
  85. .set_epsilon(1e-3)
  86. .set_param(arg.param)
  87. .execs({arg.src, arg.filter, {}});
  88. checker.set_dtype(0, dtype::Float16())
  89. .set_dtype(1, dtype::Float16())
  90. .set_dtype(2, dtype::Float16())
  91. .set_rng(0, &rng)
  92. .set_rng(1, &rng)
  93. .set_epsilon(1e-1)
  94. .set_param(arg.param)
  95. .execs({arg.src, arg.filter, {}});
  96. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  97. checker.set_dtype(0, dtype::Float16())
  98. .set_dtype(1, dtype::Float16())
  99. .set_dtype(2, dtype::Float16())
  100. .set_rng(0, &rng)
  101. .set_rng(1, &rng)
  102. .set_epsilon(1e-1)
  103. .set_param(arg.param)
  104. .execs({arg.src, arg.filter, {}});
  105. checker.set_dtype(0, dtype::BFloat16())
  106. .set_dtype(1, dtype::BFloat16())
  107. .set_dtype(2, dtype::BFloat16())
  108. .set_epsilon(1e-1)
  109. .set_param(arg.param)
  110. .execs({arg.src, arg.filter, {}});
  111. }
  112. }
  113. TEST_F(CUDA, CONV_FORWARD_MATMUL_NCHW4) {
  114. if (!cuda::is_compute_capability_required(6, 1))
  115. return;
  116. using namespace convolution;
  117. Checker<Convolution> checker(handle_cuda());
  118. UniformIntRNG int_rng{-127, 127};
  119. Convolution::Param param;
  120. param.format = Convolution::Param::Format::NCHW4;
  121. checker.set_dtype(0, dtype::QuantizedS8(0.132f))
  122. .set_dtype(1, dtype::QuantizedS8(0.0239f))
  123. .set_dtype(2, dtype::QuantizedS32(0.132f * 0.0239f))
  124. .set_rng(0, &int_rng)
  125. .set_rng(1, &int_rng)
  126. .set_param(param);
  127. checker.set_before_exec_callback(
  128. AlgoChecker<ConvolutionForward>(ExecutionPolicyAlgoName{
  129. "DEFAULT",
  130. {{ConvBiasForward::algo_name<ConvBiasForward::MatmulParam>(
  131. "MATMUL8X8X32", {})
  132. .c_str(),
  133. {}}}}));
  134. param.sparse = Convolution::Param::Sparse::DENSE;
  135. param.pad_h = param.pad_w = 1;
  136. param.stride_h = param.stride_w = 1;
  137. checker.set_param(param);
  138. checker.exec({{8, 4, 10, 10, 4}, {16, 4, 3, 3, 4}, {}});
  139. checker.exec({{1, 4, 2, 2, 4}, {16, 4, 3, 3, 4}, {}});
  140. checker.exec({{8, 64, 12, 12, 4}, {256, 64, 3, 3, 4}, {}});
  141. }
  142. TEST_F(CUDA, CONVOLUTION_1X1_FORWARD) {
  143. using namespace convolution;
  144. std::vector<TestArg> args = get_1x1_args();
  145. Checker<ConvolutionForward> checker(handle_cuda());
  146. NormalRNG default_rng;
  147. for (auto&& arg : args) {
  148. float scale =
  149. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  150. UniformFloatRNG rng(scale, 2 * scale);
  151. checker.set_dtype(0, dtype::Float32())
  152. .set_dtype(1, dtype::Float32())
  153. .set_rng(0, &default_rng)
  154. .set_rng(1, &default_rng)
  155. .set_epsilon(1e-3)
  156. .set_param(arg.param)
  157. .execs({arg.src, arg.filter, {}});
  158. }
  159. }
  160. TEST_F(CUDA, BENCHMARK_CONVOLUTION_1X1_FORWARD) {
  161. using namespace convolution;
  162. std::vector<TestArg> args = get_1x1_args();
  163. Benchmarker<ConvolutionForward> marker(handle_cuda());
  164. NormalRNG default_rng;
  165. for (auto&& arg : args) {
  166. float scale =
  167. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  168. UniformFloatRNG rng(scale, 2 * scale);
  169. marker.set_dtype(0, dtype::Float32())
  170. .set_dtype(1, dtype::Float32())
  171. .set_rng(0, &default_rng)
  172. .set_rng(1, &default_rng)
  173. .set_param(arg.param)
  174. .execs({arg.src, arg.filter, {}});
  175. }
  176. }
  177. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA) {
  178. using namespace convolution;
  179. std::vector<TestArg> args = get_args_cuda_conv_bwd_data();
  180. Checker<ConvolutionBackwardData> checker(handle_cuda());
  181. NormalRNG default_rng;
  182. for (auto&& arg : args) {
  183. float scale =
  184. 64.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  185. UniformFloatRNG rng(scale, 2 * scale);
  186. auto src = TensorLayout(arg.src, dtype::Float32());
  187. auto filter = TensorLayout(arg.filter, dtype::Float32());
  188. TensorLayout dst;
  189. {
  190. auto opr = handle_cuda()->create_operator<Convolution>();
  191. opr->param() = arg.param;
  192. opr->deduce_layout(src, filter, dst);
  193. }
  194. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  195. checker.set_rng(0, &default_rng)
  196. .set_rng(1, &default_rng)
  197. .set_epsilon(1e-3)
  198. .set_param(arg.param)
  199. .exec(TensorLayoutArray{filter, dst, src});
  200. if (!cuda::is_compute_capability_required(6, 0)) {
  201. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  202. checker.set_rng(0, &rng)
  203. .set_rng(1, &rng)
  204. .set_epsilon(1e-1)
  205. .set_param(arg.param)
  206. .exec(TensorLayoutArray{filter, dst, src});
  207. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  208. checker.set_rng(0, &rng)
  209. .set_rng(1, &rng)
  210. .set_epsilon(1e-1)
  211. .set_param(arg.param)
  212. .exec(TensorLayoutArray{filter, dst, src});
  213. }
  214. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  215. ExecutionPolicyAlgoName{"CONVOLUTION_BACKWARD_DATD_BFLOAT16",
  216. {{"MATMUL", {{"CUBLAS", {}}}}}}));
  217. src.dtype = dst.dtype = filter.dtype = dtype::BFloat16();
  218. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  219. checker.set_rng(0, &rng)
  220. .set_rng(1, &rng)
  221. .set_epsilon(1e-1)
  222. .set_param(arg.param)
  223. .exec(TensorLayoutArray{filter, dst, src});
  224. checker.reset_before_exec_callback();
  225. checker.opr()->execution_policy() = {};
  226. }
  227. }
  228. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_CUDNN) {
  229. if (cuda::is_compute_capability_required(7, 0))
  230. return;
  231. using namespace convolution;
  232. Checker<ConvolutionBackwardData> checker(handle_cuda());
  233. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  234. "CUDNN_CONVOLUTION"));
  235. //! noncontiguous case
  236. {
  237. param::Convolution param;
  238. param.pad_h = param.pad_w = 1;
  239. checker.set_param(param).execl(TensorLayoutArray{
  240. {{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()},
  241. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  242. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  243. });
  244. }
  245. }
  246. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_MATMUL) {
  247. using namespace convolution;
  248. std::vector<TestArg> args = get_args_cuda_conv_bwd_data();
  249. Checker<ConvolutionBackwardData> checker(handle_cuda());
  250. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  251. ExecutionPolicyAlgoName{"MATMUL", {{"CUBLAS", {}}}}));
  252. NormalRNG default_rng;
  253. for (auto&& arg : args) {
  254. float scale =
  255. 64.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  256. UniformFloatRNG rng(scale, 2 * scale);
  257. auto src = TensorLayout(arg.src, dtype::Float32());
  258. auto filter = TensorLayout(arg.filter, dtype::Float32());
  259. TensorLayout dst;
  260. {
  261. auto opr = handle_cuda()->create_operator<Convolution>();
  262. opr->param() = arg.param;
  263. opr->deduce_layout(src, filter, dst);
  264. }
  265. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  266. checker.set_rng(0, &default_rng)
  267. .set_rng(1, &default_rng)
  268. .set_epsilon(1e-3)
  269. .set_param(arg.param)
  270. .exec(TensorLayoutArray{filter, dst, src});
  271. }
  272. //! noncontiguous case
  273. {
  274. param::Convolution param;
  275. param.pad_h = param.pad_w = 1;
  276. checker.set_param(param).execl(TensorLayoutArray{
  277. {{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()},
  278. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  279. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  280. });
  281. }
  282. }
  283. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_INT8_NCHW4_DP4A) {
  284. if (!cuda::is_compute_capability_required(6, 1)) {
  285. printf("Skip CUDA.CONVOLUTION_BACKWARD_DATA_INT8_NCHW4_DP4A test as "
  286. "current device doesn't support\n");
  287. return;
  288. }
  289. using namespace convolution;
  290. std::vector<TestArg> args = get_args_int8_nchw4_conv_bwd_data();
  291. struct AlgoParam {
  292. int threadblock_m;
  293. int threadblock_n;
  294. int threadblock_k;
  295. int warp_m;
  296. int warp_n;
  297. int warp_k;
  298. int stage;
  299. std::string to_string() {
  300. return ssprintf("_%dX%dX%d_%dX%dX%d_%dstage", threadblock_m,
  301. threadblock_n, threadblock_k, warp_m, warp_n,
  302. warp_k, stage);
  303. }
  304. };
  305. std::vector<AlgoParam> all_params;
  306. all_params.emplace_back(AlgoParam{16, 64, 8, 16, 64, 8, 2});
  307. all_params.emplace_back(AlgoParam{16, 128, 16, 16, 64, 16, 2});
  308. all_params.emplace_back(AlgoParam{16, 128, 16, 16, 128, 16, 1});
  309. all_params.emplace_back(AlgoParam{32, 128, 32, 32, 64, 32, 2});
  310. all_params.emplace_back(AlgoParam{64, 128, 32, 64, 32, 32, 2});
  311. for (auto algo_param : all_params) {
  312. Checker<ConvolutionBackwardData> checker(handle_cuda());
  313. std::string algo_name(ssprintf("INT8_NCHW4_DOTPROD_IMPLICIT_GEMM%s",
  314. algo_param.to_string().c_str()));
  315. checker.set_before_exec_callback(
  316. AlgoChecker<ConvolutionBackwardData>(algo_name.c_str()));
  317. checker.set_epsilon(1 + 1e-3).set_max_avg_error(1e-1);
  318. for (auto&& arg : args) {
  319. UniformIntRNG rng(-3, 3);
  320. auto src = TensorLayout(arg.src, dtype::QuantizedS8{1.2f});
  321. auto filter = TensorLayout(arg.filter, dtype::QuantizedS8{1.3f});
  322. TensorLayout dst;
  323. dst.dtype = dtype::QuantizedS8{1.2f};
  324. {
  325. auto opr = handle_cuda()->create_operator<Convolution>();
  326. opr->param() = arg.param;
  327. opr->deduce_layout(src, filter, dst);
  328. }
  329. checker.set_rng(0, &rng).set_rng(1, &rng).set_param(arg.param).exec(
  330. TensorLayoutArray{filter, dst, src});
  331. }
  332. }
  333. }
  334. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_INT8_NCHW_DP4A) {
  335. if (!cuda::is_compute_capability_required(6, 1)) {
  336. printf("Skip CUDA.CONVOLUTION_BACKWARD_DATA_INT8_NCHW_DP4A test as "
  337. "current device doesn't support\n");
  338. return;
  339. }
  340. using namespace convolution;
  341. std::vector<TestArg> args = get_args_int8_nchw_conv_bwd_data();
  342. Checker<ConvolutionBackwardData> checker(handle_cuda());
  343. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  344. "INT8_NCHW_DOTPROD_IMPLICIT_GEMM"));
  345. checker.set_epsilon(1 + 1e-3).set_max_avg_error(1e-1);
  346. for (auto&& arg : args) {
  347. UniformIntRNG rng(-3, 3);
  348. auto src = TensorLayout(arg.src, dtype::QuantizedS8{1.2f});
  349. auto filter = TensorLayout(arg.filter, dtype::QuantizedS8{1.3f});
  350. TensorLayout dst;
  351. dst.dtype = dtype::QuantizedS8{1.2f};
  352. {
  353. auto opr = handle_cuda()->create_operator<Convolution>();
  354. opr->param() = arg.param;
  355. opr->deduce_layout(src, filter, dst);
  356. }
  357. checker.set_rng(0, &rng).set_rng(1, &rng).set_param(arg.param).exec(
  358. TensorLayoutArray{filter, dst, src});
  359. //! noncontiguous case
  360. {
  361. param::Convolution param;
  362. param.pad_h = param.pad_w = 1;
  363. checker.set_param(param).execl(TensorLayoutArray{
  364. {{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::QuantizedS8{1.3f}},
  365. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::QuantizedS8{1.2f}},
  366. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::QuantizedS8{1.2f}}
  367. });
  368. }
  369. }
  370. }
  371. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_FAILED_CUDNN7_5) {
  372. // BRAIN-481 failed on architectures 7.0, remove the following if statement,
  373. // when cudnn fixed the problem.
  374. if (cuda::is_compute_capability_required(7, 0))
  375. return;
  376. using namespace convolution;
  377. std::vector<TestArg> args = get_args_cudnn_7_5_failures();
  378. Checker<ConvolutionBackwardData> checker(handle_cuda());
  379. NormalRNG default_rng;
  380. for (auto&& arg : args) {
  381. float scale =
  382. 128.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  383. scale = std::max(scale, 1.f);
  384. UniformFloatRNG rng(scale, 2 * scale);
  385. auto src = TensorLayout(arg.src, dtype::Float32());
  386. auto filter = TensorLayout(arg.filter, dtype::Float32());
  387. TensorLayout dst;
  388. {
  389. auto opr = handle_cuda()->create_operator<Convolution>();
  390. opr->param() = arg.param;
  391. opr->deduce_layout(src, filter, dst);
  392. }
  393. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  394. checker.set_rng(0, &default_rng)
  395. .set_rng(1, &default_rng)
  396. .set_epsilon(1e-3)
  397. .set_param(arg.param)
  398. .exec(TensorLayoutArray{filter, dst, src});
  399. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  400. checker.set_rng(0, &rng)
  401. .set_rng(1, &rng)
  402. .set_epsilon(1e-1)
  403. .set_param(arg.param)
  404. .exec(TensorLayoutArray{filter, dst, src});
  405. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  406. checker.set_rng(0, &rng)
  407. .set_rng(1, &rng)
  408. .set_epsilon(1e-1)
  409. .set_param(arg.param)
  410. .exec(TensorLayoutArray{filter, dst, src});
  411. }
  412. }
  413. TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER) {
  414. using namespace convolution;
  415. std::vector<TestArg> args = get_args();
  416. Checker<ConvolutionBackwardFilter> checker(handle_cuda());
  417. bool f16_checked = false;
  418. for (auto&& arg : args) {
  419. auto src = TensorLayout(arg.src, dtype::Float32());
  420. auto filter = TensorLayout(arg.filter, dtype::Float32());
  421. TensorLayout dst;
  422. {
  423. auto opr = handle_cuda()->create_operator<Convolution>();
  424. opr->param() = arg.param;
  425. opr->deduce_layout(src, filter, dst);
  426. }
  427. float scale = 1.0f / sqrt(dst[2] * dst[3]);
  428. UniformFloatRNG rng(scale, 2 * scale);
  429. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  430. checker.set_rng(0, &rng)
  431. .set_rng(1, &rng)
  432. .set_epsilon(1e-3)
  433. .set_param(arg.param)
  434. .exec(TensorLayoutArray{src, dst, filter});
  435. // reduce on large f16 array may introduce significant error
  436. if (dst.total_nr_elems() >= 1000 && f16_checked)
  437. continue;
  438. f16_checked = true;
  439. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  440. checker.set_rng(0, &rng)
  441. .set_rng(1, &rng)
  442. .set_epsilon(1e-1)
  443. .set_param(arg.param)
  444. .exec(TensorLayoutArray{src, dst, filter});
  445. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  446. checker.set_rng(0, &rng)
  447. .set_rng(1, &rng)
  448. .set_epsilon(1e-1)
  449. .set_param(arg.param)
  450. .exec(TensorLayoutArray{src, dst, filter});
  451. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>(
  452. ExecutionPolicyAlgoName{"CONVOLUTION_BACKWARD_FILTER_BFLOAT16",
  453. {{"MATMUL", {{"CUBLAS", {}}}}}}));
  454. src.dtype = dst.dtype = filter.dtype = dtype::BFloat16();
  455. checker.set_rng(0, &rng)
  456. .set_rng(1, &rng)
  457. .set_epsilon(1e-1)
  458. .set_param(arg.param)
  459. .exec(TensorLayoutArray{src, dst, filter});
  460. checker.reset_before_exec_callback();
  461. checker.opr()->execution_policy() = {};
  462. }
  463. }
  464. TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER_MATMUL) {
  465. using namespace convolution;
  466. std::vector<TestArg> args = get_args();
  467. Checker<ConvolutionBackwardFilter> checker(handle_cuda());
  468. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>(
  469. ExecutionPolicyAlgoName{"MATMUL", {{"CUBLAS", {}}}}));
  470. for (auto&& arg : args) {
  471. auto src = TensorLayout(arg.src, dtype::Float32());
  472. auto filter = TensorLayout(arg.filter, dtype::Float32());
  473. TensorLayout dst;
  474. {
  475. auto opr = handle_cuda()->create_operator<Convolution>();
  476. opr->param() = arg.param;
  477. opr->deduce_layout(src, filter, dst);
  478. }
  479. float scale = 1.0f / sqrt(dst[2] * dst[3]);
  480. UniformFloatRNG rng(scale, 2 * scale);
  481. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  482. checker.set_rng(0, &rng)
  483. .set_rng(1, &rng)
  484. .set_epsilon(1e-3)
  485. .set_param(arg.param)
  486. .exec(TensorLayoutArray{src, dst, filter});
  487. }
  488. //! noncontiguous case
  489. {
  490. NormalRNG default_rng;
  491. param::Convolution param;
  492. param.pad_h = param.pad_w = 1;
  493. checker.set_rng(0, &default_rng)
  494. .set_rng(1, &default_rng)
  495. .set_param(param)
  496. .execl(TensorLayoutArray{
  497. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  498. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  499. {{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()}});
  500. }
  501. }
  502. TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER_CUDNN) {
  503. if (cuda::is_compute_capability_required(7, 0))
  504. return;
  505. using namespace convolution;
  506. Checker<ConvolutionBackwardFilter> checker(handle_cuda());
  507. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>(
  508. "CUDNN_CONVOLUTION"));
  509. //! noncontiguous case
  510. {
  511. param::Convolution param;
  512. param.pad_h = param.pad_w = 1;
  513. checker.set_param(param).execl(TensorLayoutArray{
  514. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  515. {{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()},
  516. {{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()}
  517. });
  518. }
  519. }
  520. TEST_F(CUDA, CONV_CONFIG_COMBINATIONS) {
  521. auto eps_getter = [](bool f16, int stage, const char* name) -> float {
  522. if (f16) {
  523. return stage == 2 ? 0.5 : 0.2;
  524. }
  525. if (strstr(name, "WINOGRAD_NONFUSED"))
  526. return 0.3;
  527. return 1e-3;
  528. };
  529. convolution::test_conv_config_combinations(2, handle_cuda(), false, true,
  530. true, eps_getter, true);
  531. convolution::test_conv_config_combinations(3, handle_cuda(), false, true,
  532. true, eps_getter, true);
  533. convolution::test_conv_config_combinations(5, handle_cuda(), false, true,
  534. true, eps_getter, true);
  535. }
  536. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_1) {
  537. if (cuda::is_compute_capability_required(7, 0))
  538. return;
  539. using namespace convolution;
  540. Checker<ConvolutionBackwardData> checker(handle_cuda());
  541. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  542. "CUDNN_CONVOLUTION_BWD_DATA_ALGO_1" CUDNN_VERSION_STRING));
  543. NormalRNG default_rng;
  544. TensorShape s_filter = TensorShape{8, 8, 2, 2},
  545. s_src = TensorShape{2, 8, 18, 18};
  546. float scale = 1.0f / sqrt(s_filter[0] * s_filter[2] * s_filter[3]);
  547. UniformFloatRNG rng(scale, 2 * scale);
  548. auto src = TensorLayout(s_src, dtype::Float16());
  549. auto filter = TensorLayout(s_filter, dtype::Float16());
  550. TensorLayout dst;
  551. param::Convolution param;
  552. param.pad_h = param.pad_w = 2;
  553. param.stride_h = param.stride_w = 2;
  554. {
  555. auto opr = handle_cuda()->create_operator<Convolution>();
  556. opr->param() = param;
  557. opr->deduce_layout(src, filter, dst);
  558. }
  559. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  560. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  561. checker.set_rng(0, &rng)
  562. .set_rng(1, &rng)
  563. .set_epsilon(0.2)
  564. .set_param(param)
  565. .exec(TensorLayoutArray{filter, dst, src});
  566. }
  567. #if MEGDNN_WITH_BENCHMARK
  568. TEST_F(CUDA, CONV_FWD_BENCHMARK) {
  569. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  570. size_t SH = 1, size_t SW = 1, size_t FH = 1, size_t FW = 1,
  571. size_t PH = 0, size_t PW = 0, bool fp16io_c32 = false) {
  572. auto benchmarker = Benchmarker<ConvolutionForward>(handle_cuda());
  573. benchmarker.set_dtype(0, dtype::Float16())
  574. .set_dtype(1, dtype::Float16())
  575. .set_dtype(2, dtype::Float16());
  576. ConvolutionForward::Param param;
  577. param.stride_h = SH;
  578. param.stride_w = SW;
  579. param.pad_h = PH;
  580. param.pad_w = PW;
  581. if (fp16io_c32) {
  582. param.compute_mode =
  583. ConvolutionForward::Param::ComputeMode::FLOAT32;
  584. }
  585. benchmarker.set_param(param);
  586. std::unique_ptr<OprProxy<ConvolutionForward>> proxy{
  587. new OprProxy<ConvolutionForward>{true}};
  588. benchmarker.set_proxy(proxy);
  589. size_t OH = (IH - FH + 2 * PH) / SH + 1;
  590. size_t OW = (IW - FW + 2 * PW) / SW + 1;
  591. auto time = benchmarker.execs(
  592. {{N, IC, IH, IW}, {OC, IC, FH, FW}, {N, OC, OH, OW}});
  593. time /= 1000.0 * 10.0;
  594. auto flo = (double)N * OC * IC * OH * OW * FH * FW * 2;
  595. auto flops = flo / time / 1e12;
  596. printf("comp_type %s: ", fp16io_c32 ? "32" : "16");
  597. printf("%.3fG FLO, flops %.3fTFLOPS\n", flo / 1e9, flops);
  598. };
  599. run(32, 512, 256, 56, 56, 1, 1, 1, 1, 0, 0, false);
  600. run(32, 512, 256, 56, 56, 1, 1, 1, 1, 0, 0, true);
  601. }
  602. TEST_F(CUDA, CONVOLUTION_FWD_BENCHMARK) {
  603. CUBenchmarker<ConvolutionForward> bench{handle_cuda()};
  604. std::unique_ptr<OprProxy<ConvolutionForward>> proxy{
  605. new OprProxy<ConvolutionForward>{true}};
  606. size_t RUNS = 10;
  607. bench.set_proxy(proxy).set_times(RUNS);
  608. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  609. size_t FH, size_t SH, size_t PH) {
  610. bench.set_dtype(0, dtype::Float32())
  611. .set_dtype(1, dtype::Float32())
  612. .set_dtype(2, dtype::Float32());
  613. param::Convolution param;
  614. param.stride_h = param.stride_w = SH;
  615. param.pad_h = param.pad_w = PH;
  616. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  617. bench.set_param(param);
  618. bench.proxy()->target_execution_policy.algo.reset();
  619. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  620. filter{{OC, IC, FH, FH}, dtype::Float32()};
  621. TensorLayout dst;
  622. {
  623. auto&& opr = handle_cuda()->create_operator<Convolution>();
  624. opr->param() = param;
  625. opr->deduce_layout(src, filter, dst);
  626. }
  627. auto time_ms_fp32 = bench.execl({src, filter, dst}) / RUNS;
  628. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  629. bench.proxy()->target_execution_policy.algo.reset();
  630. bench.set_dtype(0, dtype::Float16())
  631. .set_dtype(1, dtype::Float16())
  632. .set_dtype(2, dtype::Float16());
  633. auto time_ms_true_fp16 = bench.execl({src, filter, dst}) / RUNS;
  634. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  635. bench.proxy()->target_execution_policy.algo.reset();
  636. bench.set_param(param);
  637. auto time_ms_pseudo_fp16 = bench.execl({src, filter, dst}) / RUNS;
  638. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  639. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  640. filter.to_string().c_str(), dst.to_string().c_str());
  641. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  642. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  643. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  644. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  645. (flo / (time_ms_pseudo_fp16 * 1e9)));
  646. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  647. time_ms_fp32 / time_ms_true_fp16,
  648. time_ms_pseudo_fp16 / time_ms_true_fp16);
  649. };
  650. run(32, 64, 3, 224, 224, 7, 2, 3);
  651. run(32, 128, 128, 28, 28, 3, 1, 1);
  652. run(32, 256, 256, 14, 14, 3, 1, 1);
  653. run(32, 512, 512, 7, 7, 3, 1, 1);
  654. run(32, 64, 64, 56, 56, 3, 1, 1);
  655. run(32, 512, 256, 56, 56, 1, 2, 0);
  656. run(32, 1024, 512, 28, 28, 1, 2, 0);
  657. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  658. run(32, 512, 128, 28, 28, 1, 1, 0);
  659. run(32, 128, 512, 28, 28, 1, 1, 0);
  660. run(32, 1024, 256, 14, 14, 1, 1, 0);
  661. run(32, 256, 1024, 14, 14, 1, 1, 0);
  662. run(32, 2048, 512, 7, 7, 1, 1, 0);
  663. run(32, 512, 2048, 7, 7, 1, 1, 0);
  664. run(32, 256, 64, 56, 56, 1, 1, 0);
  665. run(32, 64, 256, 56, 56, 1, 1, 0);
  666. run(32, 128, 256, 56, 56, 1, 2, 0);
  667. run(32, 256, 512, 28, 28, 1, 2, 0);
  668. run(32, 512, 1024, 14, 14, 1, 2, 0);
  669. run(32, 64, 64, 56, 56, 1, 1, 0);
  670. }
  671. TEST_F(CUDA, CONVOLUTION_BWD_DATA_BENCHMARK) {
  672. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  673. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  674. new OprProxy<ConvolutionBackwardData>{true}};
  675. size_t RUNS = 10;
  676. bench.set_proxy(proxy).set_times(RUNS);
  677. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  678. size_t FH, size_t SH, size_t PH) {
  679. bench.set_dtype(0, dtype::Float32())
  680. .set_dtype(1, dtype::Float32())
  681. .set_dtype(2, dtype::Float32());
  682. param::Convolution param;
  683. param.stride_h = param.stride_w = SH;
  684. param.pad_h = param.pad_w = PH;
  685. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  686. bench.set_param(param);
  687. bench.proxy()->target_execution_policy.algo.reset();
  688. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  689. filter{{OC, IC, FH, FH}, dtype::Float32()};
  690. TensorLayout dst;
  691. {
  692. auto&& opr = handle_cuda()->create_operator<Convolution>();
  693. opr->param() = param;
  694. opr->deduce_layout(src, filter, dst);
  695. }
  696. auto time_ms_fp32 = bench.execl({filter, dst, src}) / RUNS;
  697. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  698. bench.proxy()->target_execution_policy.algo.reset();
  699. bench.set_dtype(0, dtype::Float16())
  700. .set_dtype(1, dtype::Float16())
  701. .set_dtype(2, dtype::Float16());
  702. auto time_ms_true_fp16 = bench.execl({filter, dst, src}) / RUNS;
  703. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  704. bench.proxy()->target_execution_policy.algo.reset();
  705. bench.set_param(param);
  706. auto time_ms_pseudo_fp16 = bench.execl({filter, dst, src}) / RUNS;
  707. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  708. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  709. filter.to_string().c_str(), dst.to_string().c_str());
  710. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  711. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  712. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  713. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  714. (flo / (time_ms_pseudo_fp16 * 1e9)));
  715. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  716. time_ms_fp32 / time_ms_true_fp16,
  717. time_ms_pseudo_fp16 / time_ms_true_fp16);
  718. };
  719. run(32, 64, 3, 224, 224, 7, 2, 3);
  720. run(32, 128, 128, 28, 28, 3, 1, 1);
  721. run(32, 256, 256, 14, 14, 3, 1, 1);
  722. run(32, 512, 512, 7, 7, 3, 1, 1);
  723. run(32, 64, 64, 56, 56, 3, 1, 1);
  724. run(32, 512, 256, 56, 56, 1, 2, 0);
  725. run(32, 1024, 512, 28, 28, 1, 2, 0);
  726. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  727. run(32, 512, 128, 28, 28, 1, 1, 0);
  728. run(32, 128, 512, 28, 28, 1, 1, 0);
  729. run(32, 1024, 256, 14, 14, 1, 1, 0);
  730. run(32, 256, 1024, 14, 14, 1, 1, 0);
  731. run(32, 2048, 512, 7, 7, 1, 1, 0);
  732. run(32, 512, 2048, 7, 7, 1, 1, 0);
  733. run(32, 256, 64, 56, 56, 1, 1, 0);
  734. run(32, 64, 256, 56, 56, 1, 1, 0);
  735. run(32, 128, 256, 56, 56, 1, 2, 0);
  736. run(32, 256, 512, 28, 28, 1, 2, 0);
  737. run(32, 512, 1024, 14, 14, 1, 2, 0);
  738. run(32, 64, 64, 56, 56, 1, 1, 0);
  739. }
  740. TEST_F(CUDA, BENCHMARK_CONVOLUTION_BWD_DATA_BF16) {
  741. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  742. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  743. new OprProxy<ConvolutionBackwardData>{true}};
  744. size_t RUNS = 10;
  745. bench.set_proxy(proxy).set_times(RUNS);
  746. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  747. size_t FH, size_t SH, size_t PH) {
  748. bench.set_dtype(0, dtype::BFloat16())
  749. .set_dtype(1, dtype::BFloat16())
  750. .set_dtype(2, dtype::BFloat16());
  751. param::Convolution param;
  752. param.stride_h = param.stride_w = SH;
  753. param.pad_h = param.pad_w = PH;
  754. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  755. bench.set_param(param);
  756. bench.proxy()->target_execution_policy = {};
  757. TensorLayout src{{N, IC, IH, IW}, dtype::BFloat16()},
  758. filter{{OC, IC, FH, FH}, dtype::BFloat16()};
  759. TensorLayout dst;
  760. {
  761. auto&& opr = handle_cuda()->create_operator<Convolution>();
  762. opr->param() = param;
  763. opr->deduce_layout(src, filter, dst);
  764. }
  765. auto used = bench.execl({filter, dst, src}) / RUNS;
  766. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  767. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  768. filter.to_string().c_str(), dst.to_string().c_str());
  769. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\n", used,
  770. (flo / (used * 1e9)));
  771. };
  772. run(32, 64, 3, 224, 224, 7, 2, 3);
  773. run(32, 128, 128, 28, 28, 3, 1, 1);
  774. run(32, 256, 256, 14, 14, 3, 1, 1);
  775. run(32, 512, 512, 7, 7, 3, 1, 1);
  776. run(32, 64, 64, 56, 56, 3, 1, 1);
  777. run(32, 512, 256, 56, 56, 1, 2, 0);
  778. run(32, 1024, 512, 28, 28, 1, 2, 0);
  779. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  780. run(32, 512, 128, 28, 28, 1, 1, 0);
  781. run(32, 128, 512, 28, 28, 1, 1, 0);
  782. run(32, 1024, 256, 14, 14, 1, 1, 0);
  783. run(32, 256, 1024, 14, 14, 1, 1, 0);
  784. run(32, 2048, 512, 7, 7, 1, 1, 0);
  785. run(32, 512, 2048, 7, 7, 1, 1, 0);
  786. run(32, 256, 64, 56, 56, 1, 1, 0);
  787. run(32, 64, 256, 56, 56, 1, 1, 0);
  788. run(32, 128, 256, 56, 56, 1, 2, 0);
  789. run(32, 256, 512, 28, 28, 1, 2, 0);
  790. run(32, 512, 1024, 14, 14, 1, 2, 0);
  791. run(32, 64, 64, 56, 56, 1, 1, 0);
  792. }
  793. TEST_F(CUDA, BENCHMARK_CONVOLUTION_BWD_DATA_INT8_DP4A) {
  794. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  795. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  796. new OprProxy<ConvolutionBackwardData>{true}};
  797. size_t RUNS = 10;
  798. bench.set_proxy(proxy).set_times(RUNS);
  799. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  800. size_t FH, size_t SH, size_t PH) {
  801. bench.set_dtype(0, dtype::QuantizedS8{1.0f})
  802. .set_dtype(1, dtype::QuantizedS8{1.0f})
  803. .set_dtype(2, dtype::QuantizedS8{1.0f});
  804. param::Convolution param;
  805. param.format = param::Convolution::Format::NCHW4;
  806. param.stride_h = param.stride_w = SH;
  807. param.pad_h = param.pad_w = PH;
  808. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  809. bench.set_param(param);
  810. bench.proxy()->target_execution_policy = {};
  811. TensorLayout src{{N, IC / 4, IH, IW, 4}, dtype::QuantizedS8{1.0f}},
  812. filter{{OC, IC / 4, FH, FH, 4}, dtype::QuantizedS8{1.0f}};
  813. TensorLayout dst;
  814. dst.dtype = dtype::QuantizedS8{1.0f};
  815. {
  816. auto&& opr = handle_cuda()->create_operator<Convolution>();
  817. opr->param() = param;
  818. opr->deduce_layout(src, filter, dst);
  819. }
  820. auto used = bench.execl({filter, dst, src}) / RUNS;
  821. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  822. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  823. filter.to_string().c_str(), dst.to_string().c_str());
  824. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\n", used,
  825. (flo / (used * 1e9)));
  826. };
  827. run(64, 32, 32, 92, 180, 4, 2, 2);
  828. run(64, 32, 32, 46, 80, 4, 2, 2);
  829. run(16, 16, 16, 92, 180, 4, 2, 2);
  830. run(16, 16, 16, 46, 80, 4, 2, 2);
  831. }
  832. TEST_F(CUDA, CONVOLUTION_BWD_FILTER_BENCHMARK) {
  833. CUBenchmarker<ConvolutionBackwardFilter> bench{handle_cuda()};
  834. std::unique_ptr<OprProxy<ConvolutionBackwardFilter>> proxy{
  835. new OprProxy<ConvolutionBackwardFilter>{true}};
  836. size_t RUNS = 10;
  837. bench.set_proxy(proxy).set_times(RUNS);
  838. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  839. size_t FH, size_t SH, size_t PH) {
  840. bench.set_dtype(0, dtype::Float32())
  841. .set_dtype(1, dtype::Float32())
  842. .set_dtype(2, dtype::Float32());
  843. param::Convolution param;
  844. param.stride_h = param.stride_w = SH;
  845. param.pad_h = param.pad_w = PH;
  846. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  847. bench.set_param(param);
  848. bench.proxy()->target_execution_policy.algo.reset();
  849. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  850. filter{{OC, IC, FH, FH}, dtype::Float32()};
  851. TensorLayout dst;
  852. {
  853. auto&& opr = handle_cuda()->create_operator<Convolution>();
  854. opr->param() = param;
  855. opr->deduce_layout(src, filter, dst);
  856. }
  857. auto time_ms_fp32 = bench.execl({src, dst, filter}) / RUNS;
  858. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  859. bench.proxy()->target_execution_policy.algo.reset();
  860. bench.set_dtype(0, dtype::Float16())
  861. .set_dtype(1, dtype::Float16())
  862. .set_dtype(2, dtype::Float16());
  863. auto time_ms_true_fp16 = bench.execl({src, dst, filter}) / RUNS;
  864. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  865. bench.proxy()->target_execution_policy.algo.reset();
  866. bench.set_param(param);
  867. auto time_ms_pseudo_fp16 = bench.execl({src, dst, filter}) / RUNS;
  868. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  869. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  870. filter.to_string().c_str(), dst.to_string().c_str());
  871. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  872. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  873. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  874. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  875. (flo / (time_ms_pseudo_fp16 * 1e9)));
  876. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  877. time_ms_fp32 / time_ms_true_fp16,
  878. time_ms_pseudo_fp16 / time_ms_true_fp16);
  879. };
  880. run(32, 64, 3, 224, 224, 7, 2, 3);
  881. run(32, 128, 128, 28, 28, 3, 1, 1);
  882. run(32, 256, 256, 14, 14, 3, 1, 1);
  883. run(32, 512, 512, 7, 7, 3, 1, 1);
  884. run(32, 64, 64, 56, 56, 3, 1, 1);
  885. run(32, 512, 256, 56, 56, 1, 2, 0);
  886. run(32, 1024, 512, 28, 28, 1, 2, 0);
  887. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  888. run(32, 512, 128, 28, 28, 1, 1, 0);
  889. run(32, 128, 512, 28, 28, 1, 1, 0);
  890. run(32, 1024, 256, 14, 14, 1, 1, 0);
  891. run(32, 256, 1024, 14, 14, 1, 1, 0);
  892. run(32, 2048, 512, 7, 7, 1, 1, 0);
  893. run(32, 512, 2048, 7, 7, 1, 1, 0);
  894. run(32, 256, 64, 56, 56, 1, 1, 0);
  895. run(32, 64, 256, 56, 56, 1, 1, 0);
  896. run(32, 128, 256, 56, 56, 1, 2, 0);
  897. run(32, 256, 512, 28, 28, 1, 2, 0);
  898. run(32, 512, 1024, 14, 14, 1, 2, 0);
  899. run(32, 64, 64, 56, 56, 1, 1, 0);
  900. }
  901. #endif
  902. #undef CUDNN_VERSION_STRING
  903. #undef V
  904. #undef V1
  905. } // namespace test
  906. } // namespace megdnn
  907. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台