You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_runner_test.cc 9.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <iostream>
  17. #include <memory>
  18. #include "common/common_test.h"
  19. #include "ir/dtype.h"
  20. #include "pybind_api/ir/tensor_py.h"
  21. #include "transform/transform_base_test.h"
  22. #include "common/py_func_graph_fetcher.h"
  23. #include "pipeline/jit/static_analysis/static_analysis.h"
  24. #include "frontend/operator/ops.h"
  25. #include "transform/graph_ir/df_graph_manager.h"
  26. #include "transform/graph_ir/convert.h"
  27. #include "utils/utils.h"
  28. #ifdef OPEN_SOURCE
  29. #include "ge/client/ge_api.h"
  30. #else
  31. #include "external/ge/ge_api.h"
  32. #endif
  33. #define private public
  34. #include "transform/graph_ir/graph_runner.h"
  35. using mindspore::tensor::TensorPy;
  36. namespace mindspore {
  37. namespace transform {
  38. class TestGraphRunner : public UT::Common {
  39. public:
  40. TestGraphRunner() {}
  41. void SetUp();
  42. static const std::shared_ptr<Float> kF64;
  43. static const std::shared_ptr<Float> kF32;
  44. private:
  45. };
  46. void TestGraphRunner::SetUp() { UT::InitPythonPath(); }
  47. const std::shared_ptr<Float> TestGraphRunner::kF64 = std::make_shared<Float>(64);
  48. const std::shared_ptr<Float> TestGraphRunner::kF32 = std::make_shared<Float>(32);
  49. std::shared_ptr<DfGraphConvertor> MakeGeGraph() {
  50. PrimitivePtr conv2d = prim::kPrimConv2D;
  51. conv2d->AddAttr("stride", MakeValue(static_cast<int64_t>(1)));
  52. conv2d->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
  53. conv2d->AddAttr("pad_mode", MakeValue(std::string("pad")));
  54. conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
  55. conv2d->AddAttr("group", MakeValue(static_cast<int64_t>(1)));
  56. conv2d->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
  57. conv2d->AddAttr("out_channel", MakeValue(static_cast<int64_t>(2)));
  58. conv2d->AddAttr("kernel_size", MakeValue(std::vector<int64_t>({2, 2})));
  59. conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
  60. conv2d->AddAttr("data_format", MakeValue(kOpFormat_NCHW));
  61. FuncGraphPtr anf_graph = MakeFuncGraph(conv2d, 2);
  62. std::shared_ptr<FuncGraphManager> ir_graph_manager = MakeManager({anf_graph});
  63. return std::make_shared<DfGraphConvertor>(anf_graph);
  64. }
  65. namespace {
  66. std::shared_ptr<std::vector<MeTensorPtr>> DoExecGraph(const std::vector<MeTensorPtr> &inputs) {
  67. std::vector<GeTensorPtr> ge_tensor_ptrs = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
  68. std::vector<GeTensorPtr> ge_outputs;
  69. transform::GraphRunnerOptions options;
  70. transform::GraphRunner graph_runner(options);
  71. transform::RunOptions run_options;
  72. run_options.name = "fp_bp_subgraph";
  73. MS_LOG(INFO) << "Run func_graph begin, inputs size is: " << inputs.size();
  74. Status ret = graph_runner.RunGraph(run_options, ge_tensor_ptrs, &ge_outputs);
  75. MS_LOG(INFO) << "Run func_graph finish, outputs size is: " << ge_outputs.size();
  76. if (ret != Status::SUCCESS) {
  77. return nullptr;
  78. }
  79. std::vector<std::vector<int64_t>> request_dims;
  80. std::vector<int64_t> dims1 = {1, 1, 4, 4};
  81. std::vector<int64_t> dims2 = {2, 3, 4, 5};
  82. std::vector<int64_t> dims3 = {9, 9};
  83. request_dims.emplace_back(dims1);
  84. request_dims.emplace_back(dims2);
  85. request_dims.emplace_back(dims3);
  86. std::vector<MeTensorPtr> me_outputs = TransformUtil::ConvertGeTensors(ge_outputs, request_dims);
  87. return std::make_shared<std::vector<MeTensorPtr>>(me_outputs);
  88. }
  89. } // namespace
  90. TEST_F(TestGraphRunner, TestGeTensorConstructor) {
  91. // Init a data buffer
  92. float ge_tensor_data[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
  93. // Create a Tensor with wanted data type and shape
  94. MeTensor tensor = MeTensor(TypeId::kNumberTypeFloat32, std::vector<int64_t>({1, 2, 3}));
  95. // Get the writable data pointer from the tensor
  96. float *me_tensor_data = reinterpret_cast<float *>(tensor.data_c());
  97. // Copy data from buffer to tensor's data
  98. memcpy_s(me_tensor_data, static_cast<size_t>(tensor.data().nbytes()), ge_tensor_data, sizeof(ge_tensor_data));
  99. PrintMeTensor(&tensor);
  100. std::cout << "----------------------------------" << std::endl;
  101. py::tuple py_tuple =
  102. py::make_tuple(py::make_tuple(py::make_tuple(1.1f, 2.2f, 3.3f), py::make_tuple(4.4f, 5.5f, 6.6f)));
  103. py::array my_arry = py::array(py_tuple).attr("astype").cast<py::function>()("float32").cast<py::array>();
  104. auto tensor_tuple = TensorPy::MakeTensor(my_arry, kFloat32);
  105. PrintMeTensor(tensor_tuple.get());
  106. py::array tensor_array = TensorPy::AsNumpy(tensor);
  107. py::array tensor_tuple_array = TensorPy::AsNumpy(*tensor_tuple);
  108. assert(memcmp(ge_tensor_data, tensor_array.data(), sizeof(ge_tensor_data)) == 0);
  109. assert(memcmp(ge_tensor_data, tensor_tuple_array.data(), sizeof(ge_tensor_data)) == 0);
  110. }
  111. #if (!defined ENABLE_GE)
  112. TEST_F(TestGraphRunner, TestRunGraphException) {
  113. DfGraphManager &graph_manager = DfGraphManager::GetInstance();
  114. graph_manager.ClearGraph();
  115. std::map<string, MeTensorPtr> dict;
  116. std::initializer_list<int64_t> list0{2, 1, 2, 2};
  117. MeTensorPtr init_tensor_ptr = MakeTensor(kF32, list0);
  118. dict["x1"] = init_tensor_ptr;
  119. std::shared_ptr<DfGraphConvertor> convertor = MakeGeGraph();
  120. (*convertor).ConvertAllNode().InitParam(dict).BuildGraph();
  121. auto df_graph = (*convertor).GetComputeGraph();
  122. graph_manager.AddGraph("test_graph", df_graph);
  123. std::initializer_list<int64_t> list1{1, 1, 2, 3};
  124. MeTensorPtr me_tensor_ptr = MakeTensor(kF32, list1);
  125. std::initializer_list<int64_t> list2{1, 1, 4, 4};
  126. MeTensorPtr input_ptr = MakeTensor(kF32, list2);
  127. std::vector<MeTensorPtr> me_inputs;
  128. me_inputs.emplace_back(input_ptr);
  129. std::vector<MeTensorPtr> me_outputs;
  130. GraphRunnerOptions options;
  131. GraphRunner graph_runner(options);
  132. RunOptions run_options;
  133. ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
  134. run_options.name = "test_graph";
  135. ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
  136. GraphRunner graph_runner2(options);
  137. ASSERT_TRUE(graph_runner2.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
  138. // when the GraphManager is empty
  139. graph_manager.ClearGraph();
  140. GraphRunner graph_runner3(options);
  141. ASSERT_TRUE(graph_runner3.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
  142. }
  143. TEST_F(TestGraphRunner, TestRunGraph) {
  144. DfGraphManager &graph_manager = DfGraphManager::GetInstance();
  145. graph_manager.ClearGraph();
  146. std::shared_ptr<DfGraphConvertor> convertor = MakeGeGraph();
  147. std::map<std::string, MeTensorPtr> dict;
  148. std::initializer_list<int64_t> list0{2, 1, 2, 2};
  149. dict.emplace("x1", MakeTensor(kF32, list0));
  150. (*convertor).ConvertAllNode().InitParam(dict).BuildGraph();
  151. graph_manager.AddGraph("test_graph", (*convertor).GetComputeGraph());
  152. TypePtr type_id = kFloat32;
  153. py::tuple tuple = py::make_tuple(
  154. py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))),
  155. py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))));
  156. py::array array = py::array(tuple);
  157. MeTensorPtr me_tensor_ptr = TensorPy::MakeTensor(array, type_id);
  158. MS_LOG(INFO) << "inputs me tensor data is: ";
  159. PrintMeTensor(&(*me_tensor_ptr));
  160. std::vector<MeTensorPtr> me_inputs;
  161. me_inputs.emplace_back(me_tensor_ptr);
  162. std::vector<MeTensorPtr> me_outputs;
  163. GraphRunnerOptions options;
  164. GraphRunner graph_runner(options);
  165. RunOptions run_options;
  166. run_options.name = "test_graph";
  167. ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
  168. MS_LOG(INFO) << "outputs me tensor data is: ";
  169. for (auto i = 0; i < me_outputs.size(); i++) {
  170. PrintMeTensor(&(*me_outputs[i]));
  171. }
  172. }
  173. TEST_F(TestGraphRunner, TestAPI) {
  174. DfGraphManager &graph_manager = DfGraphManager::GetInstance();
  175. graph_manager.ClearGraph();
  176. std::shared_ptr<DfGraphConvertor> convertor = MakeGeGraph();
  177. std::map<std::string, MeTensorPtr> dict;
  178. std::initializer_list<int64_t> list0{2, 1, 2, 2};
  179. dict.emplace("x1", MakeTensor(kF32, list0));
  180. (*convertor).ConvertAllNode().InitParam(dict).BuildGraph();
  181. (*convertor).DrawComputeGraph("TestGraphRunner_TestAPI_Training.dot");
  182. graph_manager.AddGraph("fp_bp_subgraph", (*convertor).GetComputeGraph());
  183. std::initializer_list<int64_t> list1{1, 1, 4, 4};
  184. std::initializer_list<int64_t> list2{2, 3, 4, 5};
  185. std::initializer_list<int64_t> list3{9, 9, 1, 1};
  186. MeTensorPtr input_ptr1 = MakeTensor(kF32, list1);
  187. MeTensorPtr input_ptr2 = MakeTensor(kF32, list2);
  188. MeTensorPtr input_ptr3 = MakeTensor(kF32, list3);
  189. std::vector<MeTensorPtr> me_inputs;
  190. std::vector<MeTensorPtr> me_outputs;
  191. me_inputs.emplace_back(input_ptr1);
  192. me_inputs.emplace_back(input_ptr2);
  193. me_inputs.emplace_back(input_ptr3);
  194. auto ret = DoExecGraph(me_inputs);
  195. ASSERT_TRUE(ret != nullptr);
  196. me_outputs = *ret;
  197. MS_LOG(INFO) << "outputs me tensor data is: ";
  198. for (auto tensor : me_outputs) {
  199. PrintMeTensor(&(*tensor));
  200. }
  201. }
  202. #endif
  203. } // namespace transform
  204. } // namespace mindspore