You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

opr_footprint.cpp 36 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /**
  2. * \file src/plugin/impl/opr_footprint.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megbrain/plugin/opr_footprint.h"
  12. #include "megbrain/opr/basic_arith.h"
  13. #include "megbrain/opr/blas.h"
  14. #include "megbrain/opr/dnn/convolution.h"
  15. #include "megbrain/opr/dnn/images2neibs.h"
  16. #include "megbrain/opr/dnn/local.h"
  17. #include "megbrain/opr/dnn/lrn.h"
  18. #include "megbrain/opr/dnn/pooling.h"
  19. #include "megbrain/opr/dnn/adaptive_pooling.h"
  20. #include "megbrain/opr/dnn/roi_pooling.h"
  21. #include "megbrain/opr/dnn/roi_align.h"
  22. #include "megbrain/opr/imgproc.h"
  23. #include "megbrain/opr/standalone/nms_opr.h"
  24. #include "megbrain/opr/io.h"
  25. #include "megbrain/opr/tensor_manip.h"
  26. #include "megbrain/opr/rand.h"
  27. #include "megbrain/opr/dnn/batch_norm.h"
  28. #include "megbrain/opr/misc.h"
  29. #include "megbrain/opr/indexing.h"
  30. #include "megbrain/opr/internal/indexing_helper.h"
  31. #include "megbrain/opr/nn_int.h"
  32. #include "megbrain/opr/tensor_gen.h"
  33. #if MGB_ENABLE_JSON
  34. #include "megdnn/opr_param_json.h"
  35. #endif
  36. #include "megbrain/utils/hash_ct.h"
  37. #include "midout.h"
  38. MIDOUT_DECL(megbrain_opr_footprint)
  39. #define MIDOUT_B(...) \
  40. MIDOUT_BEGIN(megbrain_opr_footprint, __VA_ARGS__) {
  41. #define MIDOUT_E \
  42. } \
  43. MIDOUT_END();
  44. using namespace mgb;
  45. namespace {
  46. template <class T>
  47. uint64_t opr_footprint_func(cg::OperatorNodeBase* opr);
  48. // Elemwise
  49. template <>
  50. uint64_t opr_footprint_func<opr::Elemwise>(cg::OperatorNodeBase* opr) {
  51. return opr->output()[0]->shape().total_nr_elems() *
  52. (std::max<size_t>(opr->input().size(), 2) - 1);
  53. }
  54. // AddUpdate
  55. template <>
  56. uint64_t opr_footprint_func<opr::AddUpdate>(cg::OperatorNodeBase* opr) {
  57. mgb_assert(opr->input().size() == 2,
  58. "AddUpdate opr should have two inputs");
  59. auto&& out_shape = opr->output()[0]->shape();
  60. return out_shape.total_nr_elems() * 3;
  61. }
  62. template <class Conv>
  63. uint64_t eval_conv_computation(const TensorShape& src_shape,
  64. const TensorShape& filter_shape,
  65. const TensorShape& dst_shape,
  66. cg::OperatorNodeBase* opr) {
  67. using Param = opr::ConvolutionForward::Param;
  68. auto&& param = opr->cast_final_safe<Conv>().param();
  69. if (param.format == Param::Format::NHWCD4) {
  70. size_t fh, fw;
  71. size_t group = 1;
  72. if (param.sparse == Param::Sparse::DENSE) {
  73. fh = filter_shape[1];
  74. fw = filter_shape[2];
  75. group = 1;
  76. } else {
  77. // chanwise conv
  78. mgb_assert(param.sparse == Param::Sparse::GROUP);
  79. fh = filter_shape[2];
  80. fw = filter_shape[3];
  81. group = filter_shape[0];
  82. if (filter_shape.ndim == 5) {
  83. group *= 4;
  84. }
  85. }
  86. return dst_shape.total_nr_elems() * fh * fw *
  87. src_shape[2] * 4 / group * 2;
  88. }
  89. auto eval_conv_computation_nchwx = [&param, &src_shape, &filter_shape,
  90. &dst_shape]() -> uint64_t {
  91. size_t fh, fw;
  92. bool hybird_nchwx = false;
  93. size_t group = 1;
  94. if (param.sparse == Param::Sparse::DENSE) {
  95. //! if nchwxx mode src is nchw output is nchwxx
  96. if (dst_shape.ndim == 5 && src_shape.ndim == 4) {
  97. fh = filter_shape[1];
  98. fw = filter_shape[2];
  99. hybird_nchwx = true;
  100. } else {
  101. fh = filter_shape[2];
  102. fw = filter_shape[3];
  103. }
  104. group = 1;
  105. } else {
  106. mgb_assert(param.sparse == Param::Sparse::GROUP);
  107. fh = filter_shape[3];
  108. fw = filter_shape[4];
  109. group = filter_shape[0];
  110. }
  111. if (param.format == Param::Format::NCHW88) {
  112. //! if channel wise weight layout is {group/8, FH, FW, 1, 1, 8}
  113. if (filter_shape[1] == 1 && filter_shape[2] == 1) {
  114. group *= 8;
  115. }
  116. size_t computation = dst_shape.total_nr_elems() * fh * fw *
  117. src_shape[1] / group * 2;
  118. return hybird_nchwx ? computation : computation * 8;
  119. }
  120. if (param.format == Param::Format::NCHW44 ||
  121. param.format == Param::Format::NCHW44_DOT) {
  122. //! if channel wise weight layout is {group/4, FH, FW, 1, 1, 4}
  123. if (filter_shape[1] == 1 && filter_shape[2] == 1 &&
  124. filter_shape.ndim == 6) {
  125. group *= 4;
  126. }
  127. size_t computation = dst_shape.total_nr_elems() * fh * fw *
  128. src_shape[1] / group * 2;
  129. return hybird_nchwx ? computation : computation * 4;
  130. }
  131. size_t packed_size;
  132. if (param.format == Param::Format::NCHW64) {
  133. packed_size = 64;
  134. } else if (param.format == Param::Format::NCHW32 ||
  135. param.format == Param::Format::NCHW32_NCHW4) {
  136. packed_size = 32;
  137. } else {
  138. mgb_assert(param.format == Param::Format::NCHW4 ||
  139. param.format == Param::Format::NCHW4_NHWC ||
  140. param.format == Param::Format::NCHW4_NCHW ||
  141. param.format == Param::Format::NCHW4_NCHW32,
  142. "format should be "
  143. "NCHW4/NCHW4_NCHW/NCHW4_NHWC/NCHW4_NCHW32");
  144. packed_size = 4;
  145. }
  146. return dst_shape.total_nr_elems() * fh * fw * src_shape[1] * packed_size / group *
  147. 2;
  148. };
  149. auto eval_conv_computation_chwn4 = [&param, &src_shape, &filter_shape,
  150. &dst_shape]() -> uint64_t {
  151. size_t fh, fw;
  152. size_t group = 1;
  153. if (param.sparse == Param::Sparse::DENSE) {
  154. fh = filter_shape[1];
  155. fw = filter_shape[2];
  156. group = 1;
  157. } else {
  158. mgb_assert(param.sparse == Param::Sparse::GROUP);
  159. fh = filter_shape[2];
  160. fw = filter_shape[3];
  161. group = filter_shape[0];
  162. }
  163. return dst_shape.total_nr_elems() * fh * fw * src_shape[0] * 4 / group *
  164. 2;
  165. };
  166. if (param.format == Param::Format::NCHW4 ||
  167. param.format == Param::Format::NCHW4_NCHW ||
  168. param.format == Param::Format::NCHW4_NHWC ||
  169. param.format == Param::Format::NCHW4_NCHW32 ||
  170. param.format == Param::Format::NCHW88 ||
  171. param.format == Param::Format::NCHW44 ||
  172. param.format == Param::Format::NCHW44_DOT ||
  173. param.format == Param::Format::NCHW32 ||
  174. param.format == Param::Format::NCHW32_NCHW4 ||
  175. param.format == Param::Format::NCHW64) {
  176. return eval_conv_computation_nchwx();
  177. }
  178. if (param.format == Param::Format::CHWN4) {
  179. return eval_conv_computation_chwn4();
  180. }
  181. size_t cpos;
  182. size_t spatial_start;
  183. size_t group = 1;
  184. switch (param.format) {
  185. case Param::Format::NCHW:
  186. cpos = 1;
  187. spatial_start = 2;
  188. break;
  189. case Param::Format::NHWC:
  190. cpos = 3;
  191. spatial_start = 1;
  192. break;
  193. default:
  194. mgb_assert(false, "Unknown CONV Param::Format type");
  195. }
  196. switch (param.sparse) {
  197. case Param::Sparse::DENSE:
  198. mgb_assert(filter_shape.ndim == 4 || filter_shape.ndim == 6,
  199. "DENSE conv filter shape dimension should be "
  200. "4/6(winograd mk4)");
  201. break;
  202. case Param::Sparse::GROUP:
  203. mgb_assert(filter_shape.ndim == 5 || filter_shape.ndim == 7,
  204. "GROUP conv filter shape dimension should be "
  205. "5/7(winograd mk4)");
  206. spatial_start++;
  207. group = filter_shape[0];
  208. break;
  209. default:
  210. mgb_assert(false, "Unkown CONV Param::Sparse type");
  211. }
  212. uint64_t fh = static_cast<uint64_t>(filter_shape[spatial_start]);
  213. uint64_t fw = static_cast<uint64_t>(filter_shape[spatial_start + 1]);
  214. // mul and add are counted as 2 operations
  215. return dst_shape.total_nr_elems() * fh * fw *
  216. static_cast<uint64_t>(src_shape[cpos]) / group * 2;
  217. }
  218. // ConvolutionForward
  219. template <>
  220. uint64_t opr_footprint_func<opr::ConvolutionForward>(
  221. cg::OperatorNodeBase* opr) {
  222. mgb_assert(opr->input().size() == 2,
  223. "ConvolutionFwd opr should have two inputs");
  224. auto&& out_shape = opr->output()[0]->shape();
  225. auto&& src_shape = opr->input()[0]->shape();
  226. auto&& filter_shape = opr->input()[1]->shape();
  227. return eval_conv_computation<opr::ConvolutionForward>(
  228. src_shape, filter_shape, out_shape, opr);
  229. }
  230. template <>
  231. uint64_t opr_footprint_func<opr::ConvBiasForward>(
  232. cg::OperatorNodeBase* opr) {
  233. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3 ||
  234. opr->input().size() == 4,
  235. "ConvBiasForward opr should have two/three/four inputs");
  236. auto&& out_shape = opr->output()[0]->shape();
  237. auto&& src_shape = opr->input()[0]->shape();
  238. auto&& filter_shape = opr->input()[1]->shape();
  239. uint64_t res = eval_conv_computation<opr::ConvBiasForward>(
  240. src_shape, filter_shape, out_shape, opr);
  241. if (opr->input().size() == 3) {
  242. res += out_shape.total_nr_elems();
  243. }
  244. return res;
  245. }
  246. // ConvolutionBackwardData
  247. template <>
  248. uint64_t opr_footprint_func<opr::ConvolutionBackwardData>(
  249. cg::OperatorNodeBase* opr) {
  250. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3,
  251. "ConvolutionBackwardData opr should have two or three inputs");
  252. auto&& filter_shape = opr->input()[0]->shape();
  253. auto&& diff_shape = opr->input()[1]->shape();
  254. auto&& grad_shape = opr->output()[0]->shape();
  255. return eval_conv_computation<opr::ConvolutionBackwardData>(
  256. grad_shape, filter_shape, diff_shape, opr);
  257. }
  258. // ConvolutionBackwardFilter
  259. template <>
  260. uint64_t opr_footprint_func<opr::ConvolutionBackwardFilter>(
  261. cg::OperatorNodeBase* opr) {
  262. mgb_assert(opr->input().size() == 3,
  263. "ConvolutionBackwardData opr should have three inputs");
  264. auto&& filter_shape = opr->input()[2]->shape();
  265. auto&& diff_shape = opr->input()[1]->shape();
  266. auto&& src_shape = opr->input()[0]->shape();
  267. return eval_conv_computation<opr::ConvolutionBackwardFilter>(
  268. src_shape, filter_shape, diff_shape, opr);
  269. }
  270. // MatrixMul
  271. template <>
  272. uint64_t opr_footprint_func<opr::MatrixMul>(cg::OperatorNodeBase* opr) {
  273. auto&& mopr = opr->cast_final_safe<opr::MatrixMul>();
  274. auto &&i0 = opr->input(0)->shape(), &&i1 = opr->input(1)->shape();
  275. mgb_assert(i0.ndim == 2 && i1.ndim == 2);
  276. auto m = i0[0], k0 = i0[1], k1 = i1[0], n = i1[1];
  277. if (mopr.param().transposeA) {
  278. std::swap(m, k0);
  279. }
  280. if (mopr.param().transposeB) {
  281. std::swap(k1, n);
  282. }
  283. mgb_assert(k0 == k1);
  284. // mul and add are counted as 2 operations
  285. return m * k0 * n * 2;
  286. }
  287. template <>
  288. uint64_t opr_footprint_func<opr::LocalShareForward>(cg::OperatorNodeBase* opr) {
  289. mgb_assert(opr->input().size() == 2,
  290. "LocalShare opr should have two inputs");
  291. auto&& out_shape = opr->output()[0]->shape();
  292. auto&& src_shape = opr->input()[0]->shape();
  293. auto&& filter_shape = opr->input()[1]->shape();
  294. using Param = opr::LocalShareForward::Param;
  295. auto&& param = opr->cast_final_safe<opr::LocalShareForward>().param();
  296. mgb_assert(param.format == Param::Format::NCHW);
  297. size_t groups = 1;
  298. size_t kern_spatial_pos = 3;
  299. if (param.sparse == Param::Sparse::GROUP) {
  300. groups = filter_shape[0];
  301. kern_spatial_pos = 4;
  302. }
  303. size_t fh = filter_shape[kern_spatial_pos],
  304. fw = filter_shape[kern_spatial_pos + 1];
  305. return out_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
  306. }
  307. template <>
  308. uint64_t opr_footprint_func<opr::LocalShareBackwardData>(cg::OperatorNodeBase* opr) {
  309. mgb_assert(opr->input().size() == 3,
  310. "LocalShareBackwardData opr should have three inputs");
  311. auto&& filter_shape = opr->input()[0]->shape();
  312. auto&& diff_shape = opr->input()[1]->shape();
  313. auto&& grad_shape = opr->output()[0]->shape();
  314. using Param = opr::LocalShareForward::Param;
  315. auto&& param = opr->cast_final_safe<opr::LocalShareBackwardData>().param();
  316. mgb_assert(param.format == Param::Format::NCHW);
  317. size_t groups = 1;
  318. size_t kern_spatial_pos = 3;
  319. if (param.sparse == Param::Sparse::GROUP) {
  320. groups = filter_shape[0];
  321. kern_spatial_pos = 4;
  322. }
  323. size_t fh = filter_shape[kern_spatial_pos],
  324. fw = filter_shape[kern_spatial_pos + 1];
  325. return diff_shape.total_nr_elems() * fh * fw * grad_shape[1] * 2 / groups;
  326. }
  327. template <>
  328. uint64_t opr_footprint_func<opr::LocalShareBackwardFilter>(cg::OperatorNodeBase* opr) {
  329. mgb_assert(opr->input().size() == 3,
  330. "LocalShareBackwardFilter opr should have three inputs");
  331. auto&& src_shape = opr->input()[0]->shape();
  332. auto&& diff_shape = opr->input()[1]->shape();
  333. auto&& grad_shape = opr->output()[0]->shape();
  334. using Param = opr::LocalShareForward::Param;
  335. auto&& param = opr->cast_final_safe<opr::LocalShareBackwardFilter>().param();
  336. mgb_assert(param.format == Param::Format::NCHW);
  337. size_t groups = 1;
  338. size_t kern_spatial_pos = 3;
  339. if (param.sparse == Param::Sparse::GROUP) {
  340. groups = grad_shape[0];
  341. kern_spatial_pos = 4;
  342. }
  343. size_t fh = grad_shape[kern_spatial_pos],
  344. fw = grad_shape[kern_spatial_pos + 1];
  345. return diff_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
  346. }
  347. template <>
  348. uint64_t opr_footprint_func<opr::DeformableConvForward>(
  349. cg::OperatorNodeBase* opr) {
  350. mgb_assert(opr->input().size() == 4,
  351. "DeformableConvForward opr should have four inputs");
  352. auto&& out_shape = opr->output()[0]->shape();
  353. auto&& filter_shape = opr->input()[1]->shape();
  354. using Param = opr::DeformableConvForward::Param;
  355. auto&& param = opr->cast_final_safe<opr::DeformableConvForward>().param();
  356. size_t fh, fw, icpg;
  357. mgb_assert(param.format == Param::Format::NCHW);
  358. if (param.sparse == Param::Sparse::GROUP) {
  359. icpg = filter_shape[2];
  360. fh = filter_shape[3], fw = filter_shape[4];
  361. } else {
  362. icpg = filter_shape[1];
  363. fh = filter_shape[2], fw = filter_shape[3];
  364. }
  365. //! conv(1 mul), mask(1, mul), accumulate(1 add)
  366. return out_shape.total_nr_elems() * fh * fw * icpg * 3;
  367. }
  368. template <>
  369. uint64_t opr_footprint_func<opr::DeformableConvBackwardFilter>(
  370. cg::OperatorNodeBase* opr) {
  371. mgb_assert(opr->input().size() == 5,
  372. "DeformableConvBackwardFilter opr should have four inputs");
  373. auto&& out_shape = opr->output()[0]->shape();
  374. auto&& filter_shape = opr->input()[1]->shape();
  375. using Param = opr::DeformableConvBackwardFilter::Param;
  376. auto&& param = opr->cast_final_safe<opr::Convolution>().param();
  377. size_t fh, fw, icpg;
  378. mgb_assert(param.format == Param::Format::NCHW);
  379. if (param.sparse == Param::Sparse::GROUP) {
  380. icpg = filter_shape[2];
  381. fh = filter_shape[3], fw = filter_shape[4];
  382. } else {
  383. icpg = filter_shape[1];
  384. fh = filter_shape[2], fw = filter_shape[3];
  385. }
  386. //! deconv(1 mul), mask(1 mul), accumulate(1 add), bilinear(4 add, 4mul,
  387. //! skip)
  388. return out_shape.total_nr_elems() * fh * fw * icpg * 3;
  389. }
  390. template <>
  391. uint64_t opr_footprint_func<opr::DeformableConvBackwardData>(
  392. cg::OperatorNodeBase* opr) {
  393. mgb_assert(opr->input().size() == 5,
  394. "DeformableConvBackwardData opr should have four inputs");
  395. auto&& out_shape = opr->output()[0]->shape();
  396. auto&& filter_shape = opr->input()[1]->shape();
  397. using Param = opr::DeformableConvForward::Param;
  398. auto&& param = opr->cast_final_safe<opr::Convolution>().param();
  399. size_t fh, fw, icpg;
  400. mgb_assert(param.format == Param::Format::NCHW);
  401. if (param.sparse == Param::Sparse::GROUP) {
  402. icpg = filter_shape[2];
  403. fh = filter_shape[3], fw = filter_shape[4];
  404. } else {
  405. icpg = filter_shape[1];
  406. fh = filter_shape[2], fw = filter_shape[3];
  407. }
  408. //! deconv(1 mul), mask(1 mul), accumulate(1 add), grad_weight(1 mul, skip),
  409. //! grad_coord(4mul, 4 add)
  410. return out_shape.total_nr_elems() * fh * fw * icpg * 12;
  411. }
  412. template <>
  413. uint64_t opr_footprint_func<opr::BatchConvBiasForward>(
  414. cg::OperatorNodeBase* opr) {
  415. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3 ||
  416. opr->input().size() == 4,
  417. "BatchConvBias opr should have two/three/four inputs");
  418. auto&& out_shape = opr->output()[0]->shape();
  419. auto&& src_shape = opr->input()[0]->shape();
  420. auto&& filter_shape = opr->input()[1]->shape();
  421. using Param = opr::BatchConvBiasForward::Param;
  422. auto&& param = opr->cast_final_safe<opr::BatchConvBiasForward>().param();
  423. size_t packed_channels = 1;
  424. size_t kern_spatial_pos = 3;
  425. if (param.format == Param::Format::NCHW4) {
  426. packed_channels = 4;
  427. }
  428. size_t fh = filter_shape[kern_spatial_pos],
  429. fw = filter_shape[kern_spatial_pos + 1];
  430. return out_shape.total_nr_elems() * fh * fw * src_shape[1] *
  431. packed_channels * 2;
  432. }
  433. // Pooling
  434. template <>
  435. uint64_t opr_footprint_func<opr::PoolingForward>(cg::OperatorNodeBase* opr) {
  436. auto&& param = opr->cast_final_safe<opr::PoolingForward>().param();
  437. auto area = param.window_h * param.window_w;
  438. return opr->output(0)->shape().total_nr_elems() * area;
  439. }
  440. // PoolingBackWard
  441. template <>
  442. uint64_t opr_footprint_func<opr::PoolingBackward>(cg::OperatorNodeBase* opr) {
  443. auto&& param = opr->cast_final_safe<opr::PoolingBackward>().param();
  444. auto area = param.window_h * param.window_w;
  445. return opr->input()[0]->shape().total_nr_elems() * area;
  446. }
  447. // Concat
  448. template <>
  449. uint64_t opr_footprint_func<opr::Concat>(cg::OperatorNodeBase* opr) {
  450. auto&& out_shape = opr->output()[0]->shape();
  451. return out_shape.total_nr_elems();
  452. }
  453. // Dimshuffle
  454. template <>
  455. uint64_t opr_footprint_func<opr::Dimshuffle>(cg::OperatorNodeBase* opr) {
  456. auto&& out = opr->output()[0];
  457. return out->shape().total_nr_elems();
  458. }
  459. // Reduce
  460. template <>
  461. uint64_t opr_footprint_func<opr::Reduce>(cg::OperatorNodeBase* opr) {
  462. return opr->input()[0]->shape().total_nr_elems();
  463. }
  464. // Host2DeviceCopy
  465. template <>
  466. uint64_t opr_footprint_func<opr::Host2DeviceCopy>(cg::OperatorNodeBase* opr) {
  467. auto&& out_shape = opr->output()[0]->shape();
  468. return out_shape.total_nr_elems();
  469. }
  470. /******************* Registe Param Json Functions *************************/
  471. #if MGB_ENABLE_JSON
  472. template <class T>
  473. std::shared_ptr<json::Value> opr_param_json_func(cg::OperatorNodeBase* opr);
  474. #define REGISTE_PARAM_JSON_FUNC(cls) \
  475. template <> \
  476. std::shared_ptr<json::Value> opr_param_json_func<opr::cls>( \
  477. cg::OperatorNodeBase * opr) { \
  478. return opr::opr_param_to_json( \
  479. opr->cast_final_safe<opr::cls>().param()); \
  480. }
  481. REGISTE_PARAM_JSON_FUNC(Elemwise)
  482. REGISTE_PARAM_JSON_FUNC(ConvolutionForward)
  483. REGISTE_PARAM_JSON_FUNC(Convolution3D)
  484. REGISTE_PARAM_JSON_FUNC(ConvBiasForward)
  485. REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardData)
  486. REGISTE_PARAM_JSON_FUNC(Convolution3DBackwardData)
  487. REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardFilter)
  488. REGISTE_PARAM_JSON_FUNC(MatrixMul)
  489. REGISTE_PARAM_JSON_FUNC(BatchedMatrixMul)
  490. REGISTE_PARAM_JSON_FUNC(Dot)
  491. REGISTE_PARAM_JSON_FUNC(MatrixInverse)
  492. REGISTE_PARAM_JSON_FUNC(PoolingForward)
  493. REGISTE_PARAM_JSON_FUNC(PoolingBackward)
  494. REGISTE_PARAM_JSON_FUNC(SVD)
  495. REGISTE_PARAM_JSON_FUNC(MaskConvolution)
  496. REGISTE_PARAM_JSON_FUNC(Images2Neibs)
  497. REGISTE_PARAM_JSON_FUNC(Local)
  498. REGISTE_PARAM_JSON_FUNC(GroupLocal)
  499. REGISTE_PARAM_JSON_FUNC(LRN)
  500. REGISTE_PARAM_JSON_FUNC(Concat)
  501. REGISTE_PARAM_JSON_FUNC(Reduce)
  502. REGISTE_PARAM_JSON_FUNC(LocalShareForward)
  503. REGISTE_PARAM_JSON_FUNC(LocalShareBackwardData)
  504. REGISTE_PARAM_JSON_FUNC(LocalShareBackwardFilter)
  505. REGISTE_PARAM_JSON_FUNC(DeformableConvForward)
  506. REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardFilter)
  507. REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardData)
  508. REGISTE_PARAM_JSON_FUNC(DeformablePSROIPoolingForward)
  509. REGISTE_PARAM_JSON_FUNC(BatchConvBiasForward)
  510. REGISTE_PARAM_JSON_FUNC(BatchNormForward)
  511. REGISTE_PARAM_JSON_FUNC(ElemwiseMultiType)
  512. REGISTE_PARAM_JSON_FUNC(Argsort)
  513. REGISTE_PARAM_JSON_FUNC(Argmax)
  514. REGISTE_PARAM_JSON_FUNC(Argmin)
  515. REGISTE_PARAM_JSON_FUNC(AdaptivePooling)
  516. REGISTE_PARAM_JSON_FUNC(ROIPooling)
  517. REGISTE_PARAM_JSON_FUNC(ROIAlign)
  518. REGISTE_PARAM_JSON_FUNC(WarpPerspective)
  519. REGISTE_PARAM_JSON_FUNC(WarpAffine)
  520. REGISTE_PARAM_JSON_FUNC(Remap)
  521. REGISTE_PARAM_JSON_FUNC(Resize)
  522. REGISTE_PARAM_JSON_FUNC(IndexingOneHot)
  523. REGISTE_PARAM_JSON_FUNC(IndexingSetOneHot)
  524. REGISTE_PARAM_JSON_FUNC(TopK)
  525. REGISTE_PARAM_JSON_FUNC(UniformRNG)
  526. REGISTE_PARAM_JSON_FUNC(GaussianRNG)
  527. REGISTE_PARAM_JSON_FUNC(Linspace)
  528. REGISTE_PARAM_JSON_FUNC(Eye)
  529. REGISTE_PARAM_JSON_FUNC(CvtColor)
  530. template <>
  531. std::shared_ptr<json::Value> opr_param_json_func<opr::Dimshuffle>(
  532. cg::OperatorNodeBase * opr) {
  533. auto param = opr->cast_final_safe<opr::Dimshuffle>().param();
  534. auto pattern = json::Array::make();
  535. for (size_t i = 0; i < param.pattern_len; i++)
  536. pattern->add(json::NumberInt::make(param.pattern[i]));
  537. return json::Object::make({
  538. {"ndim", json::NumberInt::make(param.ndim)},
  539. {"pattern", pattern},
  540. });
  541. }
  542. template <>
  543. std::shared_ptr<json::Value> opr_param_json_func<opr::AxisAddRemove>(
  544. cg::OperatorNodeBase * opr) {
  545. auto param = opr->cast_final_safe<opr::AxisAddRemove>().param();
  546. auto desc = json::Array::make();
  547. for (size_t i = 0; i < param.nr_desc; i++) {
  548. auto axisdesc = param.desc[i];
  549. desc->add(
  550. json::Object::make({
  551. {"method", json::NumberInt::make(
  552. static_cast<int32_t>(axisdesc.method))},
  553. {"axisnum", json::NumberInt::make(axisdesc.axis.get_raw())},
  554. }));
  555. }
  556. return json::Object::make({
  557. {"nr_desc", json::NumberInt::make(param.nr_desc)},
  558. {"desc", desc},
  559. });
  560. }
  561. std::shared_ptr<json::Value> indexing_param_to_json(
  562. const std::vector<opr::indexing::AxisIndexer>& indices) {
  563. auto desc = json::Array::make();
  564. for (auto& index : indices) {
  565. desc->add(json::Object::make({
  566. {"axis", json::NumberInt::make(index.axis.get_raw())},
  567. {"begin",
  568. json::NumberInt::make(index.begin.node() != nullptr)},
  569. {"end", json::NumberInt::make(index.end.node() != nullptr)},
  570. {"step",
  571. json::NumberInt::make(index.step.node() != nullptr)},
  572. {"idx", json::NumberInt::make(index.idx.node() != nullptr)},
  573. }));
  574. }
  575. return desc;
  576. }
  577. #define REGISTE_INDEXING_PARAM_JSON_FUNC(cls) \
  578. template <> \
  579. std::shared_ptr<json::Value> opr_param_json_func<opr::cls>( \
  580. cg::OperatorNodeBase * opr) { \
  581. auto indices = opr->cast_final_safe<opr::cls>().index_desc(); \
  582. return indexing_param_to_json(indices); \
  583. }
  584. REGISTE_INDEXING_PARAM_JSON_FUNC(Subtensor);
  585. REGISTE_INDEXING_PARAM_JSON_FUNC(SetSubtensor);
  586. REGISTE_INDEXING_PARAM_JSON_FUNC(IncrSubtensor);
  587. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingMultiAxisVec);
  588. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingSetMultiAxisVec);
  589. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingIncrMultiAxisVec);
  590. REGISTE_INDEXING_PARAM_JSON_FUNC(MeshIndexing);
  591. REGISTE_INDEXING_PARAM_JSON_FUNC(IncrMeshIndexing);
  592. REGISTE_INDEXING_PARAM_JSON_FUNC(SetMeshIndexing);
  593. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedMeshIndexing);
  594. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedIncrMeshIndexing);
  595. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedSetMeshIndexing);
  596. template <>
  597. std::shared_ptr<json::Value> opr_param_json_func<opr::Reshape>(
  598. cg::OperatorNodeBase * opr) {
  599. auto desc = json::Array::make();
  600. auto axis_param = opr->cast_final_safe<opr::Reshape>().param();
  601. if (axis_param.axis != axis_param.MAX_NDIM){
  602. return json::Object::make({
  603. {"axis", json::NumberInt::make(axis_param.axis)},
  604. });
  605. } else {
  606. return json::Object::make();
  607. }
  608. }
  609. template <>
  610. std::shared_ptr<json::Value> opr_param_json_func<opr::GetVarShape>(
  611. cg::OperatorNodeBase * opr) {
  612. auto desc = json::Array::make();
  613. auto axis_param = opr->cast_final_safe<opr::GetVarShape>().param();
  614. if (axis_param.axis != axis_param.MAX_NDIM){
  615. return json::Object::make({
  616. {"axis", json::NumberInt::make(axis_param.axis)},
  617. });
  618. } else {
  619. return json::Object::make();
  620. }
  621. }
  622. template <>
  623. std::shared_ptr<json::Value> opr_param_json_func<opr::standalone::NMSKeep>(
  624. cg::OperatorNodeBase * opr) {
  625. auto nms_param = opr->cast_final_safe<opr::standalone::NMSKeep>().param();
  626. return json::Object::make({
  627. {"iou_thresh", json::Number::make(nms_param.iou_thresh)},
  628. {"max_output", json::Number::make(nms_param.max_output)},
  629. });
  630. }
  631. #endif // MGB_ENABLE_JSON
  632. } // namespace
  633. template <class OprType>
  634. void OprFootprint::add_single_comp_footprint() {
  635. MIDOUT_B(OprType,
  636. midout_iv(MGB_HASH_STR("OprFootprint::add_single_comp_footprint")))
  637. auto&& record = m_type2comp_footprint.emplace(OprType::typeinfo(),
  638. opr_footprint_func<OprType>);
  639. mgb_assert(record.second, "duplicate opr typeinfo");
  640. MIDOUT_E
  641. }
  642. #if MGB_ENABLE_JSON
  643. template <class OprType>
  644. void OprFootprint::add_single_param_json() {
  645. auto&& record = m_type2param_json.emplace(OprType::typeinfo(),
  646. opr_param_json_func<OprType>);
  647. mgb_assert(record.second, "duplicate opr typeinfo");
  648. }
  649. #endif
  650. void OprFootprint::init_all_footprints() {
  651. add_single_comp_footprint<opr::Elemwise>();
  652. add_single_comp_footprint<opr::AddUpdate>();
  653. add_single_comp_footprint<opr::ConvolutionForward>();
  654. add_single_comp_footprint<opr::ConvBiasForward>();
  655. add_single_comp_footprint<opr::ConvolutionBackwardData>();
  656. add_single_comp_footprint<opr::ConvolutionBackwardFilter>();
  657. add_single_comp_footprint<opr::MatrixMul>();
  658. add_single_comp_footprint<opr::PoolingForward>();
  659. add_single_comp_footprint<opr::PoolingBackward>();
  660. add_single_comp_footprint<opr::Concat>();
  661. add_single_comp_footprint<opr::Dimshuffle>();
  662. add_single_comp_footprint<opr::Reduce>();
  663. add_single_comp_footprint<opr::Host2DeviceCopy>();
  664. add_single_comp_footprint<opr::LocalShareForward>();
  665. add_single_comp_footprint<opr::LocalShareBackwardData>();
  666. add_single_comp_footprint<opr::LocalShareBackwardFilter>();
  667. add_single_comp_footprint<opr::DeformableConvForward>();
  668. add_single_comp_footprint<opr::DeformableConvBackwardFilter>();
  669. add_single_comp_footprint<opr::DeformableConvBackwardData>();
  670. add_single_comp_footprint<opr::BatchConvBiasForward>();
  671. #if MGB_ENABLE_JSON
  672. add_single_param_json<opr::Elemwise>();
  673. add_single_param_json<opr::ConvolutionForward>();
  674. add_single_param_json<opr::Convolution3D>();
  675. add_single_param_json<opr::ConvBiasForward>();
  676. add_single_param_json<opr::ConvolutionBackwardData>();
  677. add_single_param_json<opr::Convolution3DBackwardData>();
  678. add_single_param_json<opr::ConvolutionBackwardFilter>();
  679. add_single_param_json<opr::MatrixMul>();
  680. add_single_param_json<opr::BatchedMatrixMul>();
  681. add_single_param_json<opr::Dot>();
  682. add_single_param_json<opr::MatrixInverse>();
  683. add_single_param_json<opr::PoolingForward>();
  684. add_single_param_json<opr::PoolingBackward>();
  685. add_single_param_json<opr::SVD>();
  686. add_single_param_json<opr::MaskConvolution>();
  687. add_single_param_json<opr::Images2Neibs>();
  688. add_single_param_json<opr::Local>();
  689. add_single_param_json<opr::GroupLocal>();
  690. add_single_param_json<opr::LRN>();
  691. add_single_param_json<opr::Concat>();
  692. add_single_param_json<opr::Dimshuffle>();
  693. add_single_param_json<opr::AxisAddRemove>();
  694. add_single_param_json<opr::Subtensor>();
  695. add_single_param_json<opr::SetSubtensor>();
  696. add_single_param_json<opr::IncrSubtensor>();
  697. add_single_param_json<opr::IndexingMultiAxisVec>();
  698. add_single_param_json<opr::IndexingSetMultiAxisVec>();
  699. add_single_param_json<opr::IndexingIncrMultiAxisVec>();
  700. add_single_param_json<opr::MeshIndexing>();
  701. add_single_param_json<opr::SetMeshIndexing>();
  702. add_single_param_json<opr::IncrMeshIndexing>();
  703. add_single_param_json<opr::BatchedMeshIndexing>();
  704. add_single_param_json<opr::BatchedSetMeshIndexing>();
  705. add_single_param_json<opr::BatchedIncrMeshIndexing>();
  706. add_single_param_json<opr::Reduce>();
  707. add_single_param_json<opr::LocalShareForward>();
  708. add_single_param_json<opr::LocalShareBackwardData>();
  709. add_single_param_json<opr::LocalShareBackwardFilter>();
  710. add_single_param_json<opr::DeformableConvForward>();
  711. add_single_param_json<opr::DeformableConvBackwardFilter>();
  712. add_single_param_json<opr::DeformableConvBackwardData>();
  713. add_single_param_json<opr::DeformablePSROIPoolingForward>();
  714. add_single_param_json<opr::BatchConvBiasForward>();
  715. add_single_param_json<opr::BatchNormForward>();
  716. add_single_param_json<opr::Reshape>();
  717. add_single_param_json<opr::GetVarShape>();
  718. add_single_param_json<opr::Argsort>();
  719. add_single_param_json<opr::Argmin>();
  720. add_single_param_json<opr::Argmax>();
  721. add_single_param_json<opr::ElemwiseMultiType>();
  722. add_single_param_json<opr::AdaptivePooling>();
  723. add_single_param_json<opr::ROIPooling>();
  724. add_single_param_json<opr::ROIAlign>();
  725. add_single_param_json<opr::WarpPerspective>();
  726. add_single_param_json<opr::Remap>();
  727. add_single_param_json<opr::Resize>();
  728. add_single_param_json<opr::IndexingOneHot>();
  729. add_single_param_json<opr::IndexingSetOneHot>();
  730. add_single_param_json<opr::WarpAffine>();
  731. add_single_param_json<opr::TopK>();
  732. add_single_param_json<opr::UniformRNG>();
  733. add_single_param_json<opr::GaussianRNG>();
  734. add_single_param_json<opr::Linspace>();
  735. add_single_param_json<opr::Eye>();
  736. add_single_param_json<opr::standalone::NMSKeep>();
  737. add_single_param_json<opr::CvtColor>();
  738. #endif
  739. }
  740. OprFootprint::Result OprFootprint::calc_footprint(cg::OperatorNodeBase* opr) {
  741. Result rst;
  742. auto&& dep_map = opr->node_prop().dep_map();
  743. for (auto&& inp : opr->input()) {
  744. if (inp->mem_plan().valid())
  745. rst.inp_layout.push_back(inp->layout());
  746. else
  747. rst.inp_layout.push_back({inp->shape(), inp->dtype()});
  748. if (cg::OperatorNodeBase::NodeProp::is_device_value_dep(
  749. dep_map.at(inp))) {
  750. rst.memory += inp->dtype().size(inp->shape().total_nr_elems());
  751. }
  752. }
  753. for (auto&& out : opr->output()) {
  754. if (out->contain_flag(VarNode::Flag::VOLATILE_CONTENT))
  755. continue;
  756. rst.out_shape.push_back(out->shape());
  757. rst.memory += out->dtype().size(out->shape().total_nr_elems());
  758. }
  759. rst.computation = get_computation(opr);
  760. #if MGB_ENABLE_JSON
  761. rst.param = get_param_json(opr);
  762. #endif
  763. rst.opr_type = opr->dyn_typeinfo();
  764. return rst;
  765. }
  766. uint64_t OprFootprint::get_computation(cg::OperatorNodeBase* opr) {
  767. auto comp_trait = m_type2comp_footprint.find(opr->dyn_typeinfo());
  768. if (comp_trait != m_type2comp_footprint.end()) {
  769. return (comp_trait->second)(opr);
  770. }
  771. return 0;
  772. }
  773. #if MGB_ENABLE_JSON
  774. std::shared_ptr<json::Value> OprFootprint::get_param_json(
  775. cg::OperatorNodeBase* opr) {
  776. auto param_trait = m_type2param_json.find(opr->dyn_typeinfo());
  777. if (param_trait != m_type2param_json.end()) {
  778. return (param_trait->second)(opr);
  779. }
  780. return json::Object::make();
  781. }
  782. std::shared_ptr<json::Value> OprFootprint::Result::to_json() const {
  783. using namespace json;
  784. std::shared_ptr<Value> comp;
  785. if (computation) {
  786. comp = NumberInt::make(computation);
  787. } else {
  788. comp = Null::make();
  789. }
  790. auto format_shape_arr = [](const TensorShapeArray& arr) {
  791. auto ret = Array::make();
  792. for (auto&& shp : arr) {
  793. auto cur = Array::make();
  794. for (size_t i = 0; i < shp.ndim; ++i) {
  795. cur->add(NumberInt::make(shp[i]));
  796. }
  797. ret->add(std::move(cur));
  798. }
  799. return ret;
  800. };
  801. auto format_layout_arr =
  802. [](const TensorLayoutArray& arr) -> std::shared_ptr<Value> {
  803. auto ret = Array::make();
  804. bool have_non_contig = false;
  805. for (auto&& item : arr) {
  806. if (item.is_contiguous()) {
  807. ret->add(json::Null::make());
  808. } else {
  809. have_non_contig = true;
  810. auto cur = Array::make();
  811. for (size_t i = 0; i < item.ndim; ++i) {
  812. cur->add(NumberInt::make(item.stride[i]));
  813. }
  814. ret->add(std::move(cur));
  815. }
  816. }
  817. if (!have_non_contig) {
  818. ret.reset();
  819. }
  820. return ret;
  821. };
  822. TensorShapeArray inp_shape;
  823. for (auto&& i : inp_layout)
  824. inp_shape.push_back(i);
  825. auto ret = Object::make({{"computation", std::move(comp)},
  826. {"memory", NumberInt::make(memory)},
  827. {"in_shapes", format_shape_arr(inp_shape)},
  828. {"out_shapes", format_shape_arr(out_shape)},
  829. {"param", param}});
  830. if (auto inp_layout_json = format_layout_arr(inp_layout)) {
  831. ret->operator[]("in_layouts") = std::move(inp_layout_json);
  832. }
  833. return ret;
  834. }
  835. std::shared_ptr<json::Value> OprFootprint::get_opr_fp_graph_exec(
  836. cg::ComputingGraph& graph, const SymbolVarArray& outputs) {
  837. OprFootprint m_opr_footprint;
  838. ComputingGraph::OutputSpec out_spec;
  839. for (auto i : outputs) {
  840. out_spec.emplace_back(i, nullptr);
  841. }
  842. graph.options().allocate_static_mem_after_graph_compile = true;
  843. auto async_exec = graph.compile(out_spec);
  844. std::vector<std::pair<json::String, std::shared_ptr<json::Value>>> rst_vals;
  845. auto on_opr = [&m_opr_footprint, &rst_vals](cg::OperatorNodeBase* opr) {
  846. Result trait(m_opr_footprint.calc_footprint(opr));
  847. rst_vals.emplace_back(json::String(opr->id_str()), trait.to_json());
  848. return true;
  849. };
  850. async_exec->iter_opr_seq(on_opr);
  851. auto opr_fp = json::Object::make(rst_vals);
  852. return json::Object::make(
  853. {{"opr_footprint", opr_fp}, {"graph_exec", async_exec->to_json()}});
  854. }
  855. #endif
  856. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台