You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

misc.cpp 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /**
  2. * \file src/opr/test/misc.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megbrain/opr/misc.h"
  12. #include "megbrain/opr/basic_arith_wrapper.h"
  13. #include "megbrain/opr/blas.h"
  14. #include "megbrain/opr/io.h"
  15. #include "megbrain/opr/tensor_manip.h"
  16. #include "megbrain/opr/utility.h"
  17. #include "megbrain/test/autocheck.h"
  18. #include "megbrain/test/helper.h"
  19. #include "megbrain/test/megdnn_helper.h"
  20. #include <numeric>
  21. #include <random>
  22. using namespace mgb;
  23. namespace {
  24. void shape_abc(const TensorShape& shape, size_t axis, size_t& A, size_t& B, size_t& C) {
  25. auto acc_mul = [](const size_t* first, const size_t* last) {
  26. return std::accumulate(first, last, 1u, std::multiplies<size_t>());
  27. };
  28. A = acc_mul(shape.shape, shape.shape + axis);
  29. B = shape.shape[axis];
  30. C = acc_mul(shape.shape + axis + 1, shape.shape + shape.ndim);
  31. }
  32. void argsort_data_gen(HostTensorND& dest) {
  33. mgb_assert(dest.layout().ndim == 2 && dest.layout().is_contiguous());
  34. size_t m = dest.layout()[0], n = dest.layout()[1];
  35. auto ptr = dest.ptr<float>();
  36. RNGxorshf rng{next_rand_seed()};
  37. std::uniform_real_distribution<float> dist_base{-10.f, 10.f},
  38. dist_delta{0.1f, 1.2f};
  39. for (size_t i = 0; i < m; ++i) {
  40. auto v = dist_base(rng);
  41. for (size_t j = 0; j < n; ++j) {
  42. ptr[j] = v;
  43. v += dist_delta(rng);
  44. }
  45. std::shuffle(ptr, ptr + n, rng);
  46. ptr += n;
  47. }
  48. }
  49. } // namespace
  50. TEST(TestOprMisc, Argmxx) {
  51. auto run = [](bool is_max, int32_t axis, TensorShape sshape) {
  52. auto dshape = sshape;
  53. dshape.shape[axis] = 1;
  54. using Checker = AutoOprChecker<1, 1>;
  55. auto make_graph =
  56. [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  57. if (is_max)
  58. return {opr::Argmax::make(inputs[0], {axis})};
  59. else
  60. return {opr::Argmin::make(inputs[0], {axis})};
  61. };
  62. auto better_than = [&](float curr, float best) {
  63. if (is_max)
  64. return curr > best;
  65. else
  66. return curr < best;
  67. };
  68. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  69. out[0].dtype(dtype::Int32()).resize(dshape);
  70. size_t A, B, C;
  71. shape_abc(sshape, axis, A, B, C);
  72. for (size_t a = 0; a < A; ++a)
  73. for (size_t c = 0; c < C; ++c) {
  74. float best_val;
  75. size_t best_arg = -1;
  76. if (is_max)
  77. best_val = std::numeric_limits<float>::lowest();
  78. else
  79. best_val = std::numeric_limits<float>::max();
  80. for (size_t b = 0; b < B; ++b) {
  81. float curr_val = inp[0]->ptr<float>()[(a * B + b) * C + c];
  82. if (better_than(curr_val, best_val)) {
  83. best_val = curr_val;
  84. best_arg = b;
  85. }
  86. }
  87. out[0].ptr<int>()[a * C + c] = best_arg;
  88. }
  89. };
  90. Checker{make_graph, fwd}
  91. .set_input_allow_grad(0, false)
  92. .set_output_allow_grad(0, false)
  93. .run({sshape})
  94. .run({sshape})
  95. .run({sshape});
  96. };
  97. run(true, 0, {5});
  98. run(true, 1, {2, 3, 4, 5});
  99. run(true, 2, {2, 3, 4, 5});
  100. run(true, 3, {2, 3, 4, 5});
  101. run(false, 0, {3, 4, 5});
  102. run(false, 1, {2, 3, 4, 5});
  103. run(false, 2, {2, 3, 4, 5});
  104. run(false, 3, {2, 3, 4, 5});
  105. }
  106. TEST(TestOprMisc, Argsort) {
  107. using Order = opr::Argsort::Param::Order;
  108. auto run = [](Order order) {
  109. using Checker = AutoOprChecker<1, 2>;
  110. auto make_graph =
  111. [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  112. return opr::Argsort::make(inputs[0], order);
  113. };
  114. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  115. size_t m = inp[0]->shape()[0], n = inp[0]->shape()[1];
  116. auto pi = inp[0]->ptr<float>();
  117. auto poval = out[0].resize({m, n}).ptr<float>();
  118. auto poidx = out[1].resize({m, n}).ptr<int>();
  119. using KV = std::pair<float, int>;
  120. std::vector<KV> row(n);
  121. for (size_t i = 0; i < m; ++i) {
  122. for (size_t j = 0; j < n; ++j) {
  123. row[j].first = pi[i * n + j];
  124. row[j].second = j;
  125. }
  126. if (order == Order::ASCENDING) {
  127. std::sort(row.begin(), row.end());
  128. } else {
  129. std::sort(row.begin(), row.end(), std::greater<KV>{});
  130. }
  131. for (size_t j = 0; j < n; ++j) {
  132. poval[i * n + j] = row[j].first;
  133. poidx[i * n + j] = row[j].second;
  134. }
  135. }
  136. };
  137. Checker::RunOptions opt;
  138. opt.numdiff_eps = 0.045;
  139. Checker{make_graph, fwd}
  140. .set_input_generator(0, argsort_data_gen)
  141. .set_output_allow_grad(1, false)
  142. .run({TensorShape{1, 1}}, opt)
  143. .run({TensorShape{5, 3}}, opt)
  144. .run({TensorShape{10, 24}}, opt);
  145. };
  146. run(Order::ASCENDING);
  147. run(Order::DESCENDING);
  148. }
  149. TEST(TestOprMisc, Cumsum) {
  150. using Param = opr::Cumsum::Param;
  151. auto run = [](const Param& param) {
  152. using Checker = AutoOprChecker<1, 1>;
  153. auto make_graph =
  154. [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  155. return {opr::Cumsum::make(inputs[0], param)};
  156. };
  157. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  158. out[0].resize(inp[0]->shape());
  159. auto pin = inp[0]->ptr<float>(), pout = out[0].ptr<float>();
  160. size_t A, B, C;
  161. int real_axis = param.axis;
  162. if (real_axis < 0)
  163. real_axis += 3;
  164. shape_abc(inp[0]->shape(), real_axis, A, B, C);
  165. ptrdiff_t stride = C;
  166. if (param.reverse)
  167. stride = -stride;
  168. for (size_t i = 0; i < A; ++i) {
  169. for (size_t k = 0; k < C; ++k) {
  170. auto pi = pin + i * B * C + k, po = pout + i * B * C + k;
  171. if (param.reverse) {
  172. pi += (B - 1) * C;
  173. po += (B - 1) * C;
  174. }
  175. if (param.exclusive) {
  176. *po = 0;
  177. po += stride;
  178. }
  179. float sum = 0;
  180. for (size_t j = 0; j < B - 1; ++j) {
  181. sum += pi[j * stride];
  182. po[j * stride] = sum;
  183. }
  184. if (!param.exclusive) {
  185. po[(B - 1) * stride] = sum + pi[(B - 1) * stride];
  186. }
  187. }
  188. }
  189. };
  190. Checker{make_graph, fwd}
  191. .run({TensorShape{2, 3, 4}})
  192. .run({TensorShape{3, 1, 2}})
  193. .run({TensorShape{4, 2, 3}});
  194. };
  195. // test negative axis
  196. for (int32_t axis = -3; axis < 3; ++axis)
  197. for (int mask = 0; mask < 4; ++mask)
  198. run({axis, bool(mask >> 1), bool(mask & 1)});
  199. }
  200. TEST(TestOprMisc, CondTake) {
  201. using Param = opr::CondTake::Param;
  202. using Checker = AutoOprChecker<2, 1>;
  203. auto make_graph = [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  204. return {opr::CondTake::make(inputs[0], inputs[1], {Param::Mode::LT})[0]};
  205. };
  206. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  207. std::vector<float> values;
  208. auto data = inp[0]->ptr<float>(), mask = inp[1]->ptr<float>();
  209. auto isize = inp[0]->shape().total_nr_elems();
  210. for (size_t i = 0; i < isize; ++i) {
  211. if (mask[i] < 0) {
  212. values.push_back(data[i]);
  213. }
  214. }
  215. out[0].resize({values.size()});
  216. memcpy(out[0].ptr<float>(), values.data(), sizeof(float) * values.size());
  217. };
  218. auto ensure_nonempty = [](Checker::NumInpArray inp) {
  219. auto mask = inp[1]->ptr<float>();
  220. auto isize = inp[1]->shape().total_nr_elems();
  221. for (size_t i = 0; i < isize; ++i) {
  222. if (mask[i] < 0)
  223. return;
  224. }
  225. mask[isize - 1] = -1;
  226. };
  227. auto mki = [](const TensorShape& shp) -> Checker::ShapeInpArray {
  228. return {shp, shp};
  229. };
  230. Checker{make_graph, fwd}
  231. .set_input_allow_grad(1, false)
  232. .set_input_coordinator(ensure_nonempty)
  233. .run(mki({2}))
  234. .run(mki({3, 5, 8}))
  235. .run(mki({100}));
  236. }
  237. TEST(TestOprMisc, CondTakeEmptyIO) {
  238. using Param = opr::CondTake::Param;
  239. HostTensorGenerator<> gen;
  240. auto check = [&](const TensorShape& shp) {
  241. auto host_x = gen(shp);
  242. auto graph = ComputingGraph::make();
  243. auto x = opr::Host2DeviceCopy::make(*graph, host_x);
  244. auto y = x + 1;
  245. auto out = opr::CondTake::make(x, y, {Param::Mode::EQ});
  246. HostTensorND host_out0, host_out1;
  247. auto func = graph->compile(
  248. {make_callback_copy(out[0], host_out0),
  249. make_callback_copy(out[1], host_out1)});
  250. func->execute();
  251. ASSERT_EQ(TensorShape{0}, host_out0.shape());
  252. ASSERT_EQ(TensorShape{0}, host_out1.shape());
  253. };
  254. check({1});
  255. check({0});
  256. check({1, 0});
  257. }
  258. TEST(TestOprMisc, TopKValueOnly) {
  259. auto run = [](bool dyn_k, bool non_contig) {
  260. using Checker = AutoOprChecker<1, 1>;
  261. std::shared_ptr<HostTensorND> host_k;
  262. SymbolVar var_x0, var_x1;
  263. auto make_graph =
  264. [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  265. auto k = opr::Host2DeviceCopy::make(
  266. *inputs[0].node()->owner_graph(), host_k);
  267. if (dyn_k) {
  268. k = opr::MarkDynamicVar::make(k);
  269. }
  270. auto x = inputs[0];
  271. if (non_contig) {
  272. var_x0 = x;
  273. x = opr::Subtensor::make(
  274. x, {opr::Subtensor::AxisIndexer::make_interval(
  275. 1, None, opr::GetVarShape::make(x, 1) / 2, None)});
  276. var_x1 = x;
  277. }
  278. auto outs = opr::TopK::make(x, k, opr::TopK::Param::Mode::KTH_ONLY);
  279. return {outs[0]};
  280. };
  281. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  282. auto opr = megdnn_naive_handle()->create_operator<megdnn::TopK>();
  283. int k = host_k->ptr<int>()[0];
  284. HostTensorND x = *inp[0];
  285. if (non_contig) {
  286. auto layout = x.layout();
  287. layout.shape[1] /= 2;
  288. x = x.sub(SubTensorSpec::make_from_layout(layout));
  289. }
  290. TensorLayout outl0, outl1;
  291. opr->deduce_layout(k, x.layout(), outl0, outl1);
  292. size_t wk_size = opr->get_workspace_in_bytes(k, x.layout(), outl0, outl1);
  293. std::unique_ptr<dt_byte[]> wk_store{new dt_byte[wk_size]};
  294. opr->exec(
  295. k, x.as_megdnn(), out[0].resize(outl0).as_megdnn(), {},
  296. {wk_store.get(), wk_size});
  297. };
  298. Checker checker{make_graph, fwd};
  299. checker.set_input_generator(0, argsort_data_gen);
  300. host_k = std::make_shared<HostTensorND>(
  301. checker.comp_node(), TensorShape{1}, dtype::Int32{});
  302. host_k->ptr<int>()[0] = 1;
  303. Checker::RunOptions opt;
  304. opt.numdiff_eps = 0.047;
  305. auto invoke = [&](int k, size_t m, size_t n) {
  306. host_k->ptr<int>()[0] = k;
  307. checker.run({TensorShape{m, n}}, opt);
  308. };
  309. if (!non_contig) {
  310. invoke(1, 1, 1);
  311. }
  312. invoke(-2, 3, 2);
  313. invoke(-1, 4, 5);
  314. invoke(3, 10, 33);
  315. invoke(-8, 23, 35);
  316. if (non_contig) {
  317. ASSERT_EQ(prev_dev_ptr(var_x0), prev_dev_ptr(var_x1));
  318. }
  319. };
  320. for (auto i : {false, true}) {
  321. for (auto j : {false, true}) {
  322. run(i, j);
  323. }
  324. }
  325. }
  326. TEST(TestOprMisc, TopKSorted) {
  327. using Checker = AutoOprChecker<1, 2>;
  328. std::shared_ptr<HostTensorND> host_k;
  329. auto constexpr mode = opr::TopK::Param::Mode::VALUE_IDX_SORTED;
  330. auto make_graph = [&](const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  331. auto k = opr::Host2DeviceCopy::make(*inputs[0].node()->owner_graph(), host_k);
  332. auto x = inputs[0];
  333. return opr::TopK::make(x, k, mode);
  334. };
  335. auto fwd = [&](Checker::NumOutArray& out, Checker::NumInpArray inp) {
  336. auto opr = megdnn_naive_handle()->create_operator<megdnn::TopK>();
  337. opr->param().mode = mode;
  338. int k = host_k->ptr<int>()[0];
  339. TensorLayout outl0, outl1;
  340. opr->deduce_layout(k, inp[0]->layout(), outl0, outl1);
  341. size_t wk_size = opr->get_workspace_in_bytes(k, inp[0]->layout(), outl0, outl1);
  342. std::unique_ptr<dt_byte[]> wk_store{new dt_byte[wk_size]};
  343. opr->exec(
  344. k, inp[0]->as_megdnn(), out[0].resize(outl0).as_megdnn(),
  345. out[1].resize(outl1).as_megdnn(), {wk_store.get(), wk_size});
  346. };
  347. Checker checker{make_graph, fwd};
  348. checker.set_input_generator(0, argsort_data_gen).set_output_allow_grad(1, false);
  349. host_k = std::make_shared<HostTensorND>(
  350. checker.comp_node(), TensorShape{1}, dtype::Int32{});
  351. host_k->ptr<int>()[0] = 1;
  352. Checker::RunOptions opt;
  353. opt.numdiff_eps = 0.047;
  354. auto invoke = [&](int k, size_t m, size_t n) {
  355. host_k->ptr<int>()[0] = k;
  356. checker.run({TensorShape{m, n}}, opt);
  357. };
  358. invoke(1, 1, 1);
  359. invoke(-1, 3, 5);
  360. invoke(5, 13, 23);
  361. invoke(-8, 35, 4);
  362. }
  363. TEST(TestOprMisc, TopKSortedIdxOnly) {
  364. HostTensorGenerator<> gen;
  365. auto graph = ComputingGraph::make();
  366. std::shared_ptr<HostTensorND> host_x = gen({2, 5});
  367. std::shared_ptr<HostTensorND> host_y = gen({2, 5});
  368. for (size_t i = 0; i < 10; ++i) {
  369. host_y->ptr<float>()[i] = 0.0f;
  370. }
  371. auto x = opr::Host2DeviceCopy::make(*graph, host_x),
  372. idx = opr::TopK::make(
  373. x, x.make_scalar(3), opr::TopK::Param::Mode::VALUE_IDX_SORTED)[1],
  374. y = opr::TypeCvt::make(idx, dtype::Float32{}),
  375. gx = cg::grad(opr::reduce_sum(y, y.make_scalar(1)), x);
  376. HostTensorND host_gx;
  377. auto func = graph->compile({make_callback_copy(gx, host_gx)});
  378. func->execute();
  379. MGB_ASSERT_TENSOR_EQ(host_gx, *host_y);
  380. }
  381. TEST(TestOprMisc, TopKGrad) {
  382. HostTensorGenerator<> gen;
  383. auto graph = ComputingGraph::make();
  384. std::shared_ptr<HostTensorND> host_x = gen({2, 5});
  385. std::shared_ptr<HostTensorND> host_k = gen({1});
  386. host_k->ptr<float>()[0] = 3;
  387. auto x = opr::Host2DeviceCopy::make(*graph, host_x),
  388. k = opr::Host2DeviceCopy::make(*graph, host_k),
  389. ki = opr::TypeCvt::make(k, dtype::Int32{}),
  390. val = opr::TopK::make(x, ki, opr::TopK::Param::Mode::VALUE_IDX_SORTED)[0],
  391. gk = cg::grad(opr::reduce_sum(val, val.make_scalar(1)), ki, true, false);
  392. EXPECT_TRUE(gk == nullptr);
  393. }
  394. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台