You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_parallel_test.cc 22 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "common/common_test.h"
  17. #include "frontend/parallel/step_parallel.h"
  18. #include "frontend/parallel/graph_util/generate_graph.h"
  19. #include "common/py_func_graph_fetcher.h"
  20. #include "debug/draw.h"
  21. #include "frontend/operator/ops.h"
  22. #include "pipeline/jit/static_analysis/static_analysis.h"
  23. #include "utils/convert_utils_py.h"
  24. namespace mindspore {
  25. namespace parallel {
  26. extern size_t TOTAL_OPS;
  27. class TestStepParallel : public UT::Common {
  28. public:
  29. TestStepParallel() {}
  30. void SetUp();
  31. void TearDown() {}
  32. };
  33. void TestStepParallel::SetUp() { UT::InitPythonPath(); }
  34. void Init_Device_Manager() {
  35. RankList dev_list;
  36. for (int32_t i = 0; i < 20; i++) {
  37. dev_list.push_back(i);
  38. }
  39. RankList stage_map;
  40. stage_map.push_back(16);
  41. stage_map.push_back(4);
  42. int32_t local_dev = 0;
  43. // create a new g_device_manager
  44. g_device_manager = std::make_shared<DeviceManager>();
  45. g_device_manager->Init(dev_list, local_dev, stage_map, "hccl");
  46. }
  47. CNodePtr Make_Node(Shape x, Shape y, Shape out, int condition = 0) {
  48. std::vector<int32_t> x_shape;
  49. std::vector<int32_t> y_shape;
  50. std::vector<int32_t> out_shape;
  51. FuncGraphPtr func_graph = std::make_shared<FuncGraph>();
  52. ParameterPtr param1 = func_graph->add_parameter();
  53. ParameterPtr param2 = func_graph->add_parameter();
  54. (void)std::transform(x.begin(), x.end(), std::back_inserter(x_shape),
  55. [](const int64_t &value) { return static_cast<int>(value); });
  56. (void)std::transform(y.begin(), y.end(), std::back_inserter(y_shape),
  57. [](const int64_t &value) { return static_cast<int>(value); });
  58. (void)std::transform(out.begin(), out.end(), std::back_inserter(out_shape),
  59. [](const int64_t &value) { return static_cast<int>(value); });
  60. param1->set_name("x");
  61. param2->set_name("y");
  62. BaseShapePtr shape1 = std::make_shared<abstract::Shape>(x);
  63. BaseShapePtr shape2 = std::make_shared<abstract::Shape>(y);
  64. BaseShapePtr shape3 = std::make_shared<abstract::Shape>(out);
  65. std::shared_ptr<tensor::Tensor> inputs_x = std::make_shared<tensor::Tensor>(kNumberTypeInt32, x_shape);
  66. std::shared_ptr<tensor::Tensor> inputs_y = std::make_shared<tensor::Tensor>(kNumberTypeInt32, y_shape);
  67. std::shared_ptr<tensor::Tensor> inputs_out = std::make_shared<tensor::Tensor>(kNumberTypeInt32, out_shape);
  68. AbstractBasePtr abstract1 = abstract::FromValue(inputs_x, true);
  69. AbstractBasePtr abstract2 = abstract::FromValue(inputs_y, true);
  70. AbstractBasePtr abstract3 = abstract::FromValue(inputs_out, true);
  71. switch (condition) {
  72. case 0: {
  73. abstract1->set_shape(shape1);
  74. abstract2->set_shape(shape2);
  75. abstract3->set_shape(shape3);
  76. param1->set_abstract(abstract1);
  77. param2->set_abstract(abstract2);
  78. break;
  79. }
  80. case 1: {
  81. abstract1->set_shape(nullptr);
  82. param1->set_abstract(abstract1);
  83. param2->set_abstract(abstract2);
  84. break;
  85. }
  86. case 2: {
  87. abstract1->set_shape(shape1);
  88. abstract2->set_shape(shape2);
  89. param1->set_abstract(abstract1);
  90. param2->set_abstract(abstract2);
  91. abstract3 = abstract::FromValue(1, false);
  92. break;
  93. }
  94. case 3: {
  95. std::vector<BaseShapePtr> shape_o = {std::make_shared<abstract::Shape>(x), std::make_shared<abstract::Shape>(y)};
  96. BaseShapePtr shape4 = std::make_shared<abstract::TupleShape>(shape_o);
  97. abstract1->set_shape(shape1);
  98. abstract2->set_shape(shape2);
  99. abstract3->set_shape(shape4);
  100. param1->set_abstract(abstract1);
  101. param2->set_abstract(abstract2);
  102. break;
  103. }
  104. default:
  105. MS_LOG(INFO) << "Do Nothing!";
  106. }
  107. std::vector<AnfNodePtr> inputs;
  108. inputs.push_back(NewValueNode(prim::kPrimMatMul));
  109. inputs.push_back(param1);
  110. inputs.push_back(param2);
  111. CNodePtr node = func_graph->NewCNode(inputs);
  112. node->set_abstract(abstract3);
  113. return node;
  114. }
  115. FuncGraphManagerPtr Make_Manager(int condition = 0) {
  116. std::vector<int32_t> inputs_x = {64, 32};
  117. std::vector<int32_t> inputs_y = {32, 64};
  118. std::vector<int32_t> inputs_z = {64, 128};
  119. std::vector<int32_t> outputs_1 = {64, 64};
  120. std::vector<int32_t> outputs_2 = {64, 128};
  121. FuncGraphPtr func_graph = std::make_shared<FuncGraph>();
  122. ParameterPtr param1 = func_graph->add_parameter();
  123. ParameterPtr param2 = func_graph->add_parameter();
  124. ParameterPtr param3 = func_graph->add_parameter();
  125. std::shared_ptr<tensor::Tensor> inputs_x_dim = std::make_shared<tensor::Tensor>(kNumberTypeInt32, inputs_x);
  126. std::shared_ptr<tensor::Tensor> inputs_y_dim = std::make_shared<tensor::Tensor>(kNumberTypeInt32, inputs_y);
  127. std::shared_ptr<tensor::Tensor> inputs_z_dim = std::make_shared<tensor::Tensor>(kNumberTypeInt32, inputs_z);
  128. std::shared_ptr<tensor::Tensor> inputs_out1_dim = std::make_shared<tensor::Tensor>(kNumberTypeInt32, outputs_1);
  129. std::shared_ptr<tensor::Tensor> inputs_out2_dim = std::make_shared<tensor::Tensor>(kNumberTypeInt32, outputs_2);
  130. AbstractBasePtr abstract_x = abstract::FromValue(inputs_x_dim, true);
  131. AbstractBasePtr abstract_y = abstract::FromValue(inputs_y_dim, true);
  132. AbstractBasePtr abstract_z = abstract::FromValue(inputs_z_dim, true);
  133. AbstractBasePtr abstract_out1 = abstract::FromValue(inputs_out1_dim, true);
  134. AbstractBasePtr abstract_out2 = abstract::FromValue(inputs_out2_dim, true);
  135. param1->set_abstract(abstract_x);
  136. param2->set_abstract(abstract_y);
  137. param3->set_abstract(abstract_z);
  138. Dimensions v1 = {2, 2};
  139. Dimensions v2 = {2, 4};
  140. std::vector<ValuePtr> elements = {MakeValue(v1), MakeValue(v2)};
  141. ValueTuplePtr var = std::make_shared<ValueTuple>(elements);
  142. std::vector<AnfNodePtr> inputs;
  143. inputs.push_back(NewValueNode(prim::kPrimMatMul));
  144. inputs.push_back(param1);
  145. inputs.push_back(param2);
  146. CNodePtr node1 = func_graph->NewCNode(inputs);
  147. node1->set_in_forward_flag(true);
  148. node1->set_abstract(abstract_out1);
  149. PrimitivePtr prim1 = node1->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  150. ValuePtr transpose_a = MakeValue(false);
  151. ValuePtr transpose_b = MakeValue(false);
  152. prim1->AddAttr("transpose_a", transpose_a);
  153. prim1->AddAttr("transpose_b", transpose_b);
  154. prim1->AddAttr("instance_name", MakeValue("matmul1"));
  155. prim1->AddAttr("strategy", var);
  156. inputs.clear();
  157. Dimensions v3 = {2, 2};
  158. Dimensions v4 = {2, 4};
  159. std::vector<ValuePtr> elements2 = {MakeValue(v3), MakeValue(v4)};
  160. ValueTuplePtr var2 = std::make_shared<ValueTuple>(elements2);
  161. inputs.push_back(NewValueNode(prim::kPrimMatMul));
  162. inputs.push_back(node1);
  163. inputs.push_back(param3);
  164. CNodePtr node2 = func_graph->NewCNode(inputs);
  165. node2->set_in_forward_flag(true);
  166. node2->set_abstract(abstract_out2);
  167. inputs.clear();
  168. inputs.push_back(NewValueNode(prim::kPrimReturn));
  169. inputs.push_back(node2);
  170. CNodePtr cnode_return = func_graph->NewCNode(inputs);
  171. cnode_return->set_in_forward_flag(true);
  172. func_graph->set_return(cnode_return);
  173. PrimitivePtr prim2 = node2->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  174. prim2->AddAttr("transpose_a", transpose_a);
  175. prim2->AddAttr("transpose_b", transpose_b);
  176. prim2->AddAttr("instance_name", MakeValue("matmul2"));
  177. prim2->AddAttr("strategy", var2);
  178. switch (condition) {
  179. case 1: {
  180. prim1->set_attr("strategy", MakeValue(0));
  181. break;
  182. }
  183. case 2: {
  184. std::vector<ValuePtr> elements_t = {MakeValue(0)};
  185. ValueTuplePtr var_t = std::make_shared<ValueTuple>(elements_t);
  186. prim1->set_attr("strategy", var_t);
  187. break;
  188. }
  189. case 3: {
  190. Dimensions vt1 = {2, 4};
  191. Dimensions vt2 = {2, 4};
  192. std::vector<ValuePtr> elements_t2 = {MakeValue(vt1), MakeValue(vt2)};
  193. ValueTuplePtr var_t2 = std::make_shared<ValueTuple>(elements_t2);
  194. prim1->set_attr("strategy", var_t2);
  195. break;
  196. }
  197. }
  198. std::vector<FuncGraphPtr> func_graphs{func_graph};
  199. FuncGraphManagerPtr manager = std::make_shared<FuncGraphManager>(func_graphs, true);
  200. manager->Init();
  201. return manager;
  202. }
  203. TEST_F(TestStepParallel, GetPythonPath1) {
  204. OperatorName operator_name = "AllReduce";
  205. const std::string expect = "mindspore.ops.operations";
  206. auto temp = parallel::GetOpPythonPath(operator_name);
  207. ASSERT_EQ(temp, expect);
  208. }
  209. TEST_F(TestStepParallel, GetPythonPath2) {
  210. OperatorName operator_name = "TensorAdd";
  211. const std::string expect = "mindspore.ops.operations";
  212. auto temp = parallel::GetOpPythonPath(operator_name);
  213. ASSERT_EQ(temp, expect);
  214. }
  215. TEST_F(TestStepParallel, ExtractStrategy) {
  216. Dimensions v1 = {2, 2};
  217. Dimensions v2 = {4, 4};
  218. std::unordered_map<std::string, ValuePtr> attrs;
  219. // stage
  220. ValuePtr val1 = MakeValue(v1);
  221. ValuePtr val2 = MakeValue(v2);
  222. std::vector<ValuePtr> elements = {val1, val2};
  223. ValueTuplePtr strategy_tuple = std::make_shared<ValueTuple>(elements);
  224. attrs["strategy"] = strategy_tuple;
  225. Strategys strategy_expect = {v1, v2};
  226. StrategyPtr strategy = ExtractStrategy(attrs);
  227. Strategys strategy_test = strategy->GetInputDim();
  228. ASSERT_EQ(strategy_expect, strategy_test);
  229. }
  230. TEST_F(TestStepParallel, ExtractShape) {
  231. Shape inputs_x_dims = {64, 32};
  232. Shape inputs_y_dims = {32, 64};
  233. Shape outputs_dims = {64, 64};
  234. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims, 4);
  235. EXPECT_THROW({ ExtractShape(node); }, std::runtime_error);
  236. }
  237. TEST_F(TestStepParallel, ExtractShape1) {
  238. Shape inputs_x_dims = {64, 32};
  239. Shape inputs_y_dims = {32, 64};
  240. Shape outputs_dims = {64, 64};
  241. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims);
  242. std::vector<Shapes> shape_test = ExtractShape(node);
  243. Shapes inputs_shape = std::vector<Shape>{inputs_x_dims, inputs_y_dims};
  244. Shapes outputs_shape = std::vector<Shape>{outputs_dims};
  245. std::vector<Shapes> shape_expect = {inputs_shape, outputs_shape};
  246. ASSERT_EQ(shape_test, shape_expect);
  247. }
  248. TEST_F(TestStepParallel, ExtractShape2) {
  249. Shape inputs_x_dims = {64, 32};
  250. Shape inputs_y_dims = {32, 64};
  251. Shape outputs_dims = {64, 64};
  252. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims, 1);
  253. EXPECT_THROW({ ExtractShape(node); }, std::runtime_error);
  254. }
  255. TEST_F(TestStepParallel, ExtractShape3) {
  256. Shape inputs_x_dims = {64, 32};
  257. Shape inputs_y_dims = {32, 64};
  258. Shape outputs_dims = {64, 64};
  259. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims, 3);
  260. Shapes inputs_shape = std::vector<Shape>{inputs_x_dims, inputs_y_dims};
  261. std::vector<Shapes> shape_expect = {inputs_shape, inputs_shape};
  262. std::vector<Shapes> shape_test = ExtractShape(node);
  263. ASSERT_EQ(shape_test, shape_expect);
  264. }
  265. TEST_F(TestStepParallel, ExtractShape4) {
  266. Shape inputs_x_dims = {64, 32};
  267. Shape inputs_y_dims = {32, 64};
  268. Shape outputs_dims = {64, 64};
  269. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims, 2);
  270. Shapes inputs_shape = std::vector<Shape>{inputs_x_dims, inputs_y_dims};
  271. EXPECT_THROW({ ExtractShape(node); }, std::runtime_error);
  272. }
  273. TEST_F(TestStepParallel, CreatOpInstance) {
  274. ValuePtr attr0_value = MakeValue(REDUCE_OP_SUM);
  275. ValuePtr attr1_value = MakeValue("0-1-2");
  276. Attr attr0 = std::make_pair("op", attr0_value);
  277. Attr attr1 = std::make_pair("group", attr1_value);
  278. OperatorAttrs attrs = {attr0, attr1};
  279. OperatorName op_name = "AllReduce";
  280. OperatorParams operator_param;
  281. OperatorArgs args = std::make_pair(attrs, operator_param);
  282. auto op_instance = CreatOpInstance(args.first, op_name, "test");
  283. ASSERT_TRUE(op_instance);
  284. PrimitivePyPtr allreduce_ptr = dyn_cast<PrimitivePy>(op_instance);
  285. ASSERT_TRUE(allreduce_ptr);
  286. if (nullptr != allreduce_ptr) {
  287. MS_LOG(INFO) << "Get PrimitivePyPtr: " << allreduce_ptr->name();
  288. std::vector<py::object> arglist;
  289. (void)std::transform(attrs.begin(), attrs.end(), std::back_inserter(arglist),
  290. [](Attr attr) { return ValuePtrToPyData(attr.second); });
  291. py::object allreduce_pyobj = parse::python_adapter::CallPyFn(
  292. "mindspore.parallel._utils", "_get_python_op", "AllReduce", "mindspore.ops.operations", "test", arglist);
  293. py::dict opAttr = py::getattr(allreduce_pyobj, "attrs");
  294. std::unordered_map<std::string, ValuePtr> attributes{};
  295. for (auto item : opAttr) {
  296. if (!py::isinstance<py::str>(item.first)) {
  297. MS_LOG(EXCEPTION) << "type error in py dict convert";
  298. }
  299. std::string name = py::cast<std::string>(item.first);
  300. MS_LOG(INFO) << "Attr name: " << name;
  301. ValuePtr converted_ret;
  302. if (name == "op") {
  303. parse::ConvertData(py::cast<py::object>(item.second), &converted_ret);
  304. ASSERT_EQ(converted_ret->ToString(), "sum");
  305. } else {
  306. if (name == "group") {
  307. parse::ConvertData(py::cast<py::object>(item.second), &converted_ret);
  308. ASSERT_EQ(converted_ret->ToString(), "0-1-2");
  309. } else if (name == "fusion") {
  310. parse::ConvertData(py::cast<py::object>(item.second), &converted_ret);
  311. ASSERT_EQ(converted_ret->ToString(), "0");
  312. } else if (name == "instance_name") {
  313. parse::ConvertData(py::cast<py::object>(item.second), &converted_ret);
  314. ASSERT_EQ(converted_ret->ToString(), "test");
  315. } else if (name == "index") {
  316. parse::ConvertData(py::cast<py::object>(item.second), &converted_ret);
  317. ASSERT_EQ(converted_ret->ToString(), "0");
  318. } else {
  319. MS_LOG(EXCEPTION) << "Test failed";
  320. }
  321. }
  322. attributes.emplace(name, converted_ret);
  323. }
  324. }
  325. }
  326. TEST_F(TestStepParallel, CreatOpInstance1) {
  327. OperatorAttrs attrs;
  328. OperatorName op_name = "ABC";
  329. OperatorParams operator_param;
  330. OperatorArgs args = std::make_pair(attrs, operator_param);
  331. EXPECT_THROW({ CreatOpInstance(args.first, op_name, "test"); }, std::runtime_error);
  332. }
  333. TEST_F(TestStepParallel, OperatorInstance) {
  334. Init_Device_Manager();
  335. // creat attrs and prim
  336. PrimitivePtr prim = NewValueNode(prim::kPrimMatMul)->value()->cast<PrimitivePtr>();
  337. ValuePtr transpose_a = MakeValue(false);
  338. ValuePtr transpose_b = MakeValue(false);
  339. prim->set_attr("transpose_a", transpose_a);
  340. prim->set_attr("transpose_b", transpose_b);
  341. auto attrs = prim->attrs();
  342. // creat strategy
  343. Strategys strategy = {{2, 2}, {2, 4}};
  344. StrategyPtr strategyPtr = parallel::NewStrategy(0, strategy);
  345. // creat shape
  346. Shapes inputs_shape = std::vector<Shape>{{64, 32}, {32, 64}};
  347. Shapes outputs_shape = std::vector<Shape>{{64, 64}};
  348. std::vector<Shapes> shape = {inputs_shape, outputs_shape};
  349. TOTAL_OPS = 0;
  350. OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape);
  351. matmul_info->Init(strategyPtr);
  352. std::string name_expect = "MatMulInfo00";
  353. std::string name_test = matmul_info->name();
  354. ASSERT_EQ(name_expect, name_test);
  355. }
  356. TEST_F(TestStepParallel, ExtractInformation) {
  357. Init_Device_Manager();
  358. FuncGraphManagerPtr manager = Make_Manager();
  359. FuncGraphSet graphs = manager->func_graphs();
  360. FuncGraphPtr graph = *graphs.begin();
  361. auto ret = graph->get_return();
  362. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  363. ExtractInformation(all_nodes);
  364. }
  365. TEST_F(TestStepParallel, ExtractInformation2) {
  366. Init_Device_Manager();
  367. FuncGraphManagerPtr manager = Make_Manager(2);
  368. FuncGraphSet graphs = manager->func_graphs();
  369. FuncGraphPtr graph = *graphs.begin();
  370. auto ret = graph->get_return();
  371. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  372. EXPECT_THROW({ ExtractInformation(all_nodes); }, std::runtime_error);
  373. }
  374. TEST_F(TestStepParallel, ExtractInformation3) {
  375. Init_Device_Manager();
  376. FuncGraphManagerPtr manager = Make_Manager(3);
  377. FuncGraphSet graphs = manager->func_graphs();
  378. FuncGraphPtr graph = *graphs.begin();
  379. auto ret = graph->get_return();
  380. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  381. EXPECT_THROW({ ExtractInformation(all_nodes); }, std::runtime_error);
  382. }
  383. TEST_F(TestStepParallel, ForwardCommunication1) {
  384. Init_Device_Manager();
  385. ValuePtr attr0_value = MakeValue(REDUCE_OP_SUM);
  386. ValuePtr attr1_value = MakeValue("0-1-2");
  387. Attr attr0 = std::make_pair("op", attr0_value);
  388. Attr attr1 = std::make_pair("group", attr1_value);
  389. OperatorAttrs attrs = {attr0, attr1};
  390. OperatorName op_name = "AllReduce";
  391. OperatorParams operator_param;
  392. OperatorArgs args = std::make_pair(attrs, operator_param);
  393. Operator op = std::make_pair(op_name, args);
  394. OperatorVector op_list = {op, op};
  395. FuncGraphManagerPtr manager = Make_Manager();
  396. FuncGraphSet graphs = manager->func_graphs();
  397. FuncGraphPtr graph = *graphs.begin();
  398. auto ret = graph->get_return();
  399. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  400. ExtractInformation(all_nodes);
  401. for (auto &node : all_nodes) {
  402. if (!node->isa<CNode>()) {
  403. continue;
  404. }
  405. auto cnode = node->cast<CNodePtr>();
  406. FuncGraphPtr func_graph = node->func_graph();
  407. PrimitivePtr prim = cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  408. if (prim->name() == "MatMul") {
  409. ForwardCommunication(op_list, cnode);
  410. draw::Draw("./forwardcommunication.dot", func_graph);
  411. }
  412. }
  413. AnfNodeSet after_nodes = manager->all_nodes();
  414. for (auto &node : after_nodes) {
  415. if (!node->isa<CNode>()) {
  416. continue;
  417. }
  418. auto &inputs = node->cast<CNodePtr>()->inputs();
  419. PrimitivePtr prim = inputs[0]->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  420. if (prim->name() == "return" || prim->name() == "MatMul") {
  421. if (!inputs[1]->isa<Parameter>()) {
  422. CNodePtr pre_node = inputs[1]->cast<CNodePtr>();
  423. PrimitivePtr pre_prim = pre_node->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  424. CNodePtr pre_node2 = pre_node->input(1)->cast<CNodePtr>();
  425. PrimitivePtr pre_prim2 = pre_node2->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  426. ASSERT_EQ("AllReduce", pre_prim->name());
  427. ASSERT_EQ("AllReduce", pre_prim2->name());
  428. }
  429. }
  430. }
  431. }
  432. TEST_F(TestStepParallel, ForwardCommunication2) {
  433. OperatorVector op_list;
  434. FuncGraphManagerPtr manager = Make_Manager();
  435. FuncGraphSet graphs = manager->func_graphs();
  436. FuncGraphPtr graph = *graphs.begin();
  437. auto ret = graph->get_return();
  438. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  439. ExtractInformation(all_nodes);
  440. for (auto &node : all_nodes) {
  441. if (!node->isa<CNode>()) {
  442. continue;
  443. }
  444. auto cnode = node->cast<CNodePtr>();
  445. FuncGraphPtr func_graph = node->func_graph();
  446. func_graph->set_manager(nullptr);
  447. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  448. if (prim->name() == "MatMul") {
  449. EXPECT_THROW({ ForwardCommunication(op_list, cnode); }, std::runtime_error);
  450. break;
  451. }
  452. }
  453. }
  454. TEST_F(TestStepParallel, ForwardCommunication3) {
  455. OperatorVector op_list;
  456. FuncGraphManagerPtr manager = Make_Manager();
  457. FuncGraphSet graphs = manager->func_graphs();
  458. FuncGraphPtr graph = *graphs.begin();
  459. auto ret = graph->get_return();
  460. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  461. ExtractInformation(all_nodes);
  462. for (auto &node : all_nodes) {
  463. if (!node->isa<CNode>()) {
  464. continue;
  465. }
  466. auto cnode = node->cast<CNodePtr>();
  467. FuncGraphPtr func_graph = node->func_graph();
  468. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  469. if (prim->name() == "MatMul") {
  470. OperatorAttrs attrs;
  471. OperatorParams operator_param;
  472. OperatorArgs args = std::make_pair(attrs, operator_param);
  473. Operator op = std::make_pair("ABC", args);
  474. OperatorVector op_list = {op};
  475. EXPECT_THROW({ ForwardCommunication(op_list, cnode); }, std::runtime_error);
  476. break;
  477. }
  478. }
  479. }
  480. TEST_F(TestStepParallel, GetTensorInLayout) {
  481. Init_Device_Manager();
  482. // creat attrs and prim
  483. FuncGraphPtr func_graph = std::make_shared<FuncGraph>();
  484. Shape inputs_x_dims = {64, 32};
  485. Shape inputs_y_dims = {32, 64};
  486. Shape outputs_dims = {64, 64};
  487. CNodePtr node = Make_Node(inputs_x_dims, inputs_y_dims, outputs_dims);
  488. std::vector<AnfNodePtr> inputs(node->inputs());
  489. CNodePtr node1 = func_graph->NewCNode(inputs);
  490. PrimitivePtr prim = node1->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  491. ValuePtr transpose_a = MakeValue(false);
  492. ValuePtr transpose_b = MakeValue(false);
  493. prim->set_attr("transpose_a", transpose_a);
  494. prim->set_attr("transpose_b", transpose_b);
  495. auto attrs = prim->attrs();
  496. // creat strategy
  497. Strategys strategy = {{2, 2}, {2, 4}};
  498. StrategyPtr strategyPtr = parallel::NewStrategy(0, strategy);
  499. // creat shape
  500. Shapes inputs_shape = std::vector<Shape>{{64, 32}, {32, 64}};
  501. Shapes outputs_shape = std::vector<Shape>{{64, 64}};
  502. std::vector<Shapes> shape = {inputs_shape, outputs_shape};
  503. OperatorInfoPtr matmul_info = OperatorInstance(prim, attrs, shape);
  504. matmul_info->Init(strategyPtr);
  505. node->set_user_data<OperatorInfo>(matmul_info);
  506. OperatorInfoPtr distribute_operator_pre = node->user_data<OperatorInfo>();
  507. TensorLayout tensorlayout_e;
  508. Shape array = {64, 64};
  509. TensorLayout tensorlayout = GetTensorInLayout(node1, prim, distribute_operator_pre);
  510. Shape tensor_shape_test = tensorlayout.tensor_shape().array();
  511. ASSERT_EQ(array, tensor_shape_test);
  512. }
  513. } // namespace parallel
  514. } // namespace mindspore