You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_gen.cpp 3.4 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /**
  2. * \file src/opr/test/tensor_gen.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megbrain/opr/tensor_gen.h"
  12. #include "megbrain/opr/io.h"
  13. #include "megbrain/test/autocheck.h"
  14. #include "megbrain/test/helper.h"
  15. using namespace mgb;
  16. using namespace opr;
  17. TEST(TestTensorGen, Alloc) {
  18. auto host_x =
  19. std::make_shared<HostTensorND>(CompNode::load("xpu0"), dtype::Int32());
  20. auto graph = ComputingGraph::make();
  21. auto x = opr::Host2DeviceCopy::make_no_value_infer(*graph, host_x),
  22. y = opr::Alloc::make(x, dtype::Float32());
  23. HostTensorND host_y;
  24. auto func = graph->compile({make_callback_copy(y, host_y)});
  25. auto px = host_x->resize({3}).ptr<int>();
  26. px[0] = 2;
  27. px[1] = 3;
  28. px[2] = 5;
  29. func->execute();
  30. ASSERT_EQ(TensorShape({2, 3, 5}), host_y.shape());
  31. }
  32. TEST(TestTensorGen, Linspace) {
  33. auto host_num =
  34. std::make_shared<HostTensorND>(CompNode::load("xpu0"), dtype::Int32());
  35. host_num->resize({1}).ptr<int>()[0] = 30;
  36. using Checker = AutoOprChecker<2, 1>;
  37. for (auto endpoint : {false, true}) {
  38. auto make_graph =
  39. [endpoint, &host_num](
  40. const Checker::SymInpArray& inputs) -> Checker::SymOutArray {
  41. auto num = opr::Host2DeviceCopy::make(
  42. *inputs[0].node()->owner_graph(), host_num)
  43. .rename("num");
  44. return {opr::Linspace::make(
  45. inputs[0].rename("start"), inputs[1].rename("stop"), num,
  46. {endpoint})
  47. .rename("linspace")};
  48. };
  49. auto fwd = [&](Checker::NumOutArray& dest, Checker::NumInpArray inp) {
  50. size_t num = host_num->ptr<int>()[0];
  51. auto ptr = dest[0].resize({num}).ptr<float>();
  52. auto start = *inp[0]->ptr<float>(), stop = *inp[1]->ptr<float>(),
  53. step = (stop - start) / std::max<int>((num - endpoint), 1);
  54. for (size_t i = 0; i < num; ++i)
  55. ptr[i] = start + step * i;
  56. };
  57. Checker::RunOptions opt;
  58. opt.numdiff_eps = 1; // large eps because all linear
  59. std::array<TensorShape, 2> ishp{TensorShape{1}, {1}};
  60. Checker checker(make_graph, fwd);
  61. host_num->ptr<int>()[0] = 30;
  62. checker.run(ishp, opt).run(ishp, opt);
  63. host_num->ptr<int>()[0] = 1;
  64. checker.run(ishp, opt);
  65. }
  66. }
  67. TEST(TestTensorGen, Eye) {
  68. auto graph = ComputingGraph::make();
  69. auto x = opr::Eye::make(
  70. SymbolVar::make_scalar(5, *graph, CompNode::load("xpu0")),
  71. {-1, DTypeEnum::Int32});
  72. HostTensorND host_x;
  73. auto func = graph->compile({make_callback_copy(x, host_x)});
  74. func->execute();
  75. ASSERT_EQ(TensorShape({5, 5}), host_x.shape());
  76. auto ptr = host_x.ptr<int>();
  77. for (int i = 0; i < 5; ++i) {
  78. for (int j = 0; j < 5; ++j)
  79. ASSERT_EQ(*(ptr++), i - j - 1 == 0);
  80. }
  81. }
  82. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}