You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 52 kB

6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "parallel/step_auto_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include "ir/anf.h"
  28. #include "ir/meta_tensor.h"
  29. #include "optimizer/opt.h"
  30. #include "optimizer/optimizer.h"
  31. #include "parallel/auto_parallel/dp_algo_costmodel.h"
  32. #include "parallel/auto_parallel/edge_costmodel.h"
  33. #include "parallel/auto_parallel/graph_costmodel.h"
  34. #include "parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  35. #include "parallel/auto_parallel/rec_core/rec_parse_graph.h"
  36. #include "parallel/auto_parallel/rec_core/rec_partition.h"
  37. #include "parallel/context.h"
  38. #include "parallel/ops_info/tmp_identity_info.h"
  39. #include "parallel/ops_info/reshape_info.h"
  40. #include "parallel/step_parallel.h"
  41. #include "parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  42. #include "pipeline/parse/python_adapter.h"
  43. #include "pipeline/pipeline.h"
  44. namespace mindspore {
  45. namespace parallel {
  46. // splittable_op_ will continuously be updated
  47. std::vector<std::string> splittable_op_ = {MATMUL,
  48. GELU,
  49. TANH,
  50. SOFTMAX,
  51. LOG_SOFTMAX,
  52. ACTIVATION,
  53. PRELU,
  54. FLOORDIV,
  55. L2_NORMALIZE,
  56. TRANSPOSE,
  57. RESHAPE,
  58. TENSOR_ADD,
  59. SUB,
  60. MUL,
  61. DIV,
  62. GREATER,
  63. MAXPOOL,
  64. MAXPOOLV2,
  65. VIRTUAL_DATA_SET,
  66. SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  67. RELU,
  68. ONEHOT,
  69. DROPOUT_DO_MASK,
  70. REDUCE_MAX,
  71. REDUCE_MIN,
  72. ARGMAXWITHVALUE,
  73. ARGMINWITHVALUE,
  74. REDUCE_SUM,
  75. CONV2D,
  76. FUSE_BATCH_NORM,
  77. POOLING,
  78. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  79. SIGMOID_CROSS_ENTROPY_WITH_LOGITS,
  80. MAX_POOL_WITH_ARGMAX,
  81. SIMPLE_MEAN,
  82. FLATTEN,
  83. BATCH_NORM,
  84. LAYER_NORM,
  85. BIAS_ADD,
  86. ASSIGN_SUB,
  87. COS,
  88. ACOS,
  89. EXP,
  90. LOG,
  91. REDUCE_MEAN,
  92. REAL_DIV,
  93. SIGMOID,
  94. POW,
  95. MAXIMUM,
  96. MINIMUM,
  97. EQUAL,
  98. NOT_EQUAL,
  99. LOGICALNOT,
  100. GATHERV2,
  101. STRIDEDSLICE,
  102. SQRT,
  103. GET_NEXT,
  104. CAST,
  105. NEG,
  106. SQUARE,
  107. BATCH_MATMUL,
  108. EXPAND_DIMS,
  109. SQUEEZE};
  110. std::vector<std::string> elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, CAST,
  111. POW, EXP, LOG, COS, ACOS, LOGICALNOT, NEG, SQUARE};
  112. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  113. MS_EXCEPTION_IF_NULL(root);
  114. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  115. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  116. // assume no change to graph
  117. bool changes = false;
  118. // control whether use model_parallel mode
  119. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  120. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  121. return changes;
  122. }
  123. // check whether strategy_search_mode is valid
  124. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  125. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  126. // Setting searching mode: dynanic programming as default.
  127. strategy_search_mode = DYNAMIC_PROGRAMMING;
  128. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  129. }
  130. struct timeval start_time, end_time;
  131. (void)gettimeofday(&start_time, nullptr);
  132. if (MsContext::GetInstance()->save_graphs_flag()) {
  133. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  134. }
  135. MS_LOG(INFO) << "Now entering step auto parallel";
  136. TOTAL_OPS = 0;
  137. AnfNodePtr ret = root->get_return();
  138. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  139. if (ParallelInit() != SUCCESS) {
  140. MS_LOG(EXCEPTION) << "Parallel init failed";
  141. }
  142. // mark the forward cnodes, parallel only care these nodes
  143. MarkForwardCNode(root);
  144. if (FindCommunicationOp(all_nodes)) {
  145. MS_LOG(EXCEPTION) << "The graph contain communication op";
  146. }
  147. // search parallelization strategy
  148. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  149. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  150. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  151. }
  152. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  153. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  154. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  155. }
  156. } else {
  157. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  158. }
  159. (void)gettimeofday(&end_time, nullptr);
  160. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  161. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  162. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  163. root->flags()[AUTO_PARALLEL_RUN_ONCE_ONLY] = true;
  164. return changes;
  165. }
  166. // Given the node, return whether each input is a parameter or a output of a operator.
  167. // The returned boolean vector should be the same order of the inputs, thus its implementation
  168. // is closely consistent with ExtractShape() in step_parallel.cc
  169. std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
  170. std::vector<bool> is_parameter;
  171. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  172. for (size_t i = 1; i < node_inputs.size(); ++i) {
  173. auto input = node_inputs[i];
  174. if (input->isa<Parameter>()) {
  175. auto input_parameter = input->cast<ParameterPtr>();
  176. if (input_parameter->has_default()) {
  177. bool require_grad =
  178. py::cast<bool>(parse::python_adapter::GetPyObjAttr(input_parameter->default_param(), "requires_grad"));
  179. is_parameter.push_back(require_grad);
  180. } else {
  181. is_parameter.push_back(false);
  182. }
  183. } else if (input->isa<CNode>() || IsValueNode<tensor::Tensor>(input) || IsValueNode<RefKey>(input)) {
  184. is_parameter.push_back(false);
  185. }
  186. }
  187. return is_parameter;
  188. }
  189. // Given the type, return the number of bytes to represent this type
  190. size_t GetLengthOfDataType(const TypePtr &type) {
  191. switch (type->type_id()) {
  192. case kNumberTypeBool:
  193. return sizeof(bool);
  194. case kNumberTypeInt8:
  195. return sizeof(int8_t);
  196. case kNumberTypeInt16:
  197. return sizeof(int16_t);
  198. case kNumberTypeInt32:
  199. return sizeof(int32_t);
  200. case kNumberTypeInt64:
  201. return sizeof(int64_t);
  202. case kNumberTypeUInt8:
  203. return sizeof(uint8_t);
  204. case kNumberTypeUInt16:
  205. return sizeof(uint16_t);
  206. case kNumberTypeUInt32:
  207. return sizeof(uint32_t);
  208. case kNumberTypeUInt64:
  209. return sizeof(uint64_t);
  210. case kNumberTypeFloat16:
  211. return sizeof(float) / 2;
  212. case kNumberTypeFloat32:
  213. return sizeof(float);
  214. case kNumberTypeFloat64:
  215. return sizeof(double);
  216. case kNumberTypeInt:
  217. return sizeof(int);
  218. case kNumberTypeUInt:
  219. return sizeof(unsigned int);
  220. case kNumberTypeFloat:
  221. return sizeof(float);
  222. default:
  223. MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
  224. }
  225. }
  226. size_t GetInputsTypeLen(const AnfNodePtr &input) {
  227. MS_EXCEPTION_IF_NULL(input);
  228. if (!input->isa<CNode>() && !input->isa<Parameter>() && !IsValueNode<tensor::Tensor>(input)) {
  229. MS_LOG(EXCEPTION) << "The input node is not a cnode or parameter or tensor";
  230. }
  231. size_t input_type_len = 0;
  232. auto type = input->Type();
  233. MS_EXCEPTION_IF_NULL(type);
  234. if (type->isa<mindspore::TensorType>()) {
  235. auto input_element_type = type->cast<mindspore::TensorTypePtr>()->element();
  236. input_type_len = GetLengthOfDataType(input_element_type);
  237. } else {
  238. MS_LOG(EXCEPTION) << "Unknown type: " << type->type_name();
  239. }
  240. return input_type_len;
  241. }
  242. std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
  243. MS_EXCEPTION_IF_NULL(node);
  244. std::vector<size_t> inputs_type_len;
  245. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  246. // extract input element length
  247. for (auto &input : node_inputs) {
  248. if (IsValueNode<RefKey>(input)) {
  249. auto func_graph = node->func_graph();
  250. MS_EXCEPTION_IF_NULL(func_graph);
  251. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  252. if (parameters.size() != 1) {
  253. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  254. }
  255. inputs_type_len.push_back(GetInputsTypeLen(parameters[0]));
  256. } else if (input->isa<CNode>() || input->isa<Parameter>() || IsValueNode<tensor::Tensor>(input)) {
  257. // extract input shape from parameter and apply node
  258. inputs_type_len.push_back(GetInputsTypeLen(input));
  259. }
  260. }
  261. return inputs_type_len;
  262. }
  263. std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
  264. MS_EXCEPTION_IF_NULL(node);
  265. std::vector<TypePtr> outputs_type;
  266. // extract output element type
  267. auto primary_output_type = node->Type();
  268. MS_EXCEPTION_IF_NULL(primary_output_type);
  269. if (primary_output_type->isa<mindspore::Tuple>()) {
  270. // in this case, the output is a tuple
  271. auto tuple_output_type = primary_output_type->cast<mindspore::TuplePtr>();
  272. auto elements = tuple_output_type->elements();
  273. for (auto &ele : elements) {
  274. if (ele->isa<mindspore::TensorType>()) {
  275. auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
  276. outputs_type.push_back(ele_element_type);
  277. } else {
  278. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  279. }
  280. }
  281. } else {
  282. // in this case, the output is a single tensor
  283. if (primary_output_type->isa<mindspore::TensorType>()) {
  284. auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
  285. outputs_type.push_back(element_type);
  286. } else {
  287. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  288. }
  289. }
  290. return outputs_type;
  291. }
  292. bool IsElementWiseOperator(const std::string &op_name) {
  293. auto iter = std::find(elementwise_op_.begin(), elementwise_op_.end(), op_name);
  294. return (iter != elementwise_op_.end());
  295. }
  296. bool IsSplittableOperator(const std::string &op_name) {
  297. std::vector<std::string>::iterator iter;
  298. iter = std::find(splittable_op_.begin(), splittable_op_.end(), op_name);
  299. return (iter != splittable_op_.end());
  300. }
  301. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  302. MS_EXCEPTION_IF_NULL(cnode);
  303. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  304. if (prim_node == nullptr) {
  305. return false;
  306. }
  307. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  308. if (prim == nullptr) {
  309. return false;
  310. }
  311. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  312. if (bool_result) {
  313. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  314. } else if (prim->name() == CAST) {
  315. return true;
  316. }
  317. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  318. }
  319. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, StrategyMap *stra_map) {
  320. MS_EXCEPTION_IF_NULL(prim);
  321. MS_EXCEPTION_IF_NULL(cnode);
  322. auto attrs = prim->attrs();
  323. std::vector<Shapes> shape_list = ExtractShape(cnode);
  324. if (shape_list.empty()) {
  325. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  326. }
  327. // Create an OperatorInfo instance
  328. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  329. MS_EXCEPTION_IF_NULL(operator_info);
  330. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  331. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  332. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  333. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  334. return nullptr;
  335. }
  336. // Set the data type for inputs and outputs of this OperatorInfo
  337. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  338. auto outputs_type = ExtractOutputTypeByNode(cnode);
  339. std::vector<size_t> outputs_type_length;
  340. outputs_type_length.reserve(outputs_type.size());
  341. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  342. GetLengthOfDataType);
  343. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  344. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  345. return nullptr;
  346. }
  347. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  348. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  349. return nullptr;
  350. }
  351. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  352. // ANF graph
  353. auto &inputs = cnode->inputs();
  354. std::vector<ValuePtr> input_value;
  355. for (size_t index = 1; index < inputs.size(); ++index) {
  356. if (inputs[index]->isa<ValueNode>()) {
  357. input_value.push_back(GetValueNode(inputs[index]));
  358. } else {
  359. input_value.emplace_back(nullptr);
  360. }
  361. }
  362. operator_info->set_input_value(input_value);
  363. operator_info->set_outputs_dtype(cnode->Type());
  364. operator_info->set_cnode(cnode);
  365. // key of strategy map
  366. std::string strategy_key_name = NodeParameterName(cnode);
  367. bool load_strategy_from_ckpt =
  368. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  369. // If no strategy has been configured for this operator, then candidate strategies are generated for
  370. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  371. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  372. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  373. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  374. // BatchParallelInfo operator
  375. operator_info->ComputeBatchSplitFlagList();
  376. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  377. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  378. return nullptr;
  379. }
  380. } else {
  381. // In this case, the configured strategy should be extracted to help setting cost
  382. StrategyPtr strategyPtr;
  383. if (load_strategy_from_ckpt) {
  384. strategyPtr = (*stra_map)[strategy_key_name];
  385. } else {
  386. strategyPtr = parallel::ExtractStrategy(attrs);
  387. }
  388. if (strategyPtr != nullptr) {
  389. if (prim->name() == RESHAPE) {
  390. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  391. }
  392. // Set cost for this configured strategy
  393. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  394. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  395. } else if (FULLY_USE_DEVICES) {
  396. // If configured to fully use devices, then checking for the user-specified strategy
  397. int32_t used_devices = operator_info->used_devices();
  398. MS_EXCEPTION_IF_NULL(g_device_manager);
  399. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  400. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  401. if (used_devices == 1) {
  402. return operator_info;
  403. }
  404. // 'used_devices == -1' means that 'used_devices_' is not set
  405. if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) {
  406. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  407. << "but the specified strategy uses device: " << used_devices
  408. << ", total devices: " << total_device_num;
  409. }
  410. }
  411. }
  412. }
  413. return operator_info;
  414. }
  415. // Using CNode's UniqueIds to construct nodes
  416. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  417. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  418. entire_costgraph = std::make_shared<CostGraph>();
  419. entire_costgraph->SetDeviceMemoryAndCostParameter();
  420. // The map from CNode's UniqueId to its operatorInfo
  421. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  422. // extract strategy from checkpoint for multi-train
  423. StrategyMap stra_map;
  424. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  425. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  426. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  427. }
  428. }
  429. // Step 1
  430. for (auto &node : all_nodes) {
  431. // NOTE: we only care about splittable Primitive operators
  432. auto cnode = node->cast<CNodePtr>();
  433. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  434. if (bool_result) {
  435. continue;
  436. }
  437. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  438. if (!IsAutoParallelCareNode(cnode)) {
  439. continue;
  440. }
  441. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  442. MS_EXCEPTION_IF_NULL(prim);
  443. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  444. if (search_cnode == from_cnode_to_info.end()) {
  445. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  446. if (operator_info == nullptr) {
  447. return FAILED;
  448. }
  449. // Needed by rec_parser
  450. operator_info->set_type(prim->name());
  451. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  452. entire_costgraph->AddOperator(operator_info);
  453. (void)cnode->set_operator_info(operator_info);
  454. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  455. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  456. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  457. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  458. // Needed by rec_parser
  459. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  460. } else {
  461. // Two CNODEs' UniqueIds should not be equal
  462. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  463. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  464. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  465. }
  466. }
  467. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  468. return SUCCESS;
  469. }
  470. // Using CNode's UniqueIdThroughCopys to construct nodes
  471. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  472. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  473. entire_costgraph = std::make_shared<CostGraph>();
  474. entire_costgraph->SetDeviceMemoryAndCostParameter();
  475. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  476. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  477. // extract strategy from checkpoint for multi-train
  478. StrategyMap stra_map;
  479. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  480. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  481. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  482. }
  483. }
  484. for (auto &node : all_nodes) {
  485. // NOTE: we only care about splittable Primitive operators
  486. auto cnode = node->cast<CNodePtr>();
  487. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  488. if (bool_result) {
  489. continue;
  490. }
  491. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  492. if (!IsAutoParallelCareNode(cnode)) {
  493. continue;
  494. }
  495. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  496. // Find the operatorInfo if it exists
  497. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  498. if (search_cnode == from_cnode_to_info.end()) {
  499. // In this case, the corresponding OperatorInfo is not created, create the new one.
  500. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  501. if (operator_info == nullptr) {
  502. return FAILED;
  503. }
  504. // Needed by rec_parser
  505. operator_info->set_type(prim->name());
  506. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  507. entire_costgraph->AddOperator(operator_info);
  508. (void)cnode->set_operator_info(operator_info);
  509. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  510. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  511. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  512. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  513. // Needed by rec_parser
  514. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  515. } else {
  516. auto current_op_ptr = search_cnode->second;
  517. if (current_op_ptr == nullptr) {
  518. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  519. } else {
  520. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  521. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  522. (current_op_ptr->name().find(prim->name()) == std::string::npos);
  523. if (is_find_wrong) {
  524. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  525. << " does not match the Prim: " << prim->name();
  526. }
  527. (void)cnode->set_operator_info(current_op_ptr);
  528. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  529. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  530. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  531. }
  532. }
  533. }
  534. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  535. return SUCCESS;
  536. }
  537. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  538. // Step 2
  539. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  540. for (auto &node : all_nodes) {
  541. auto cnode = node->cast<CNodePtr>();
  542. bool bool_result_cnode = (cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0));
  543. if (bool_result_cnode) {
  544. continue;
  545. }
  546. auto &inputs = cnode->inputs();
  547. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  548. if (!IsAutoParallelCareNode(cnode)) {
  549. continue;
  550. }
  551. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  552. size_t edge_count = 0;
  553. for (size_t i = 1; i < inputs.size(); ++i) {
  554. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  555. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  556. if (bool_result_prev_cnode) {
  557. continue;
  558. }
  559. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  560. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  561. size_t output_index = 0;
  562. bool bool_result =
  563. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  564. while (bool_result) {
  565. if (IsAutoParallelCareNode(prev_cnode)) {
  566. std::string edge_name =
  567. prev_cnode->operator_info()->name() + OPERATOR_TO_OPERATOR_CONNECTOR + cnode->operator_info()->name();
  568. // If the edge between these two operators already has been added, then the edge will not be added again.
  569. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, i - 1)) {
  570. break;
  571. }
  572. EdgePtr edge_ptr;
  573. MS_LOG(INFO) << "Creating edge: " << edge_name;
  574. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  575. (ELEMENTWISE_OP_STRA_FOLLOW && IsElementWiseOperator(prev_prim->name()));
  576. if (follow_strategy) {
  577. // Redistribution in not allowed on the edge.
  578. // Elementwise operators have the same strategy as their previous operators.
  579. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  580. output_index, i - 1, false, true);
  581. } else {
  582. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  583. output_index, i - 1, false);
  584. }
  585. // Init costs for this edge
  586. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  587. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  588. }
  589. cnode->operator_info()->AddPrevEdge(edge_ptr);
  590. prev_cnode->operator_info()->AddSuccEdge(edge_ptr);
  591. entire_costgraph->AddEdge(prev_cnode->operator_info(), cnode->operator_info(), edge_ptr);
  592. MS_LOG(INFO) << "Successfully adding the edge between " << prev_cnode->operator_info()->name() << " and "
  593. << cnode->operator_info()->name();
  594. edge_count++;
  595. break;
  596. } else if (prev_prim->name() == TUPLE_GETITEM) {
  597. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  598. // this 'tuple_getitem'
  599. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  600. output_index = IntToSize(GetValue<int>(GetValueNode(prev_cnode->input(2))));
  601. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  602. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  603. if (bool_result_tuple) {
  604. break;
  605. }
  606. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  607. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  608. if (!IsAutoParallelCareNode(prev_cnode)) {
  609. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  610. }
  611. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  612. << "and creating an edge between the Operator before "
  613. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  614. } else if (prev_prim->name() == DEPEND) {
  615. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  616. // this 'depend'
  617. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  618. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  619. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  620. if (bool_result_depend) {
  621. break;
  622. }
  623. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  624. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  625. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  626. << "and creating an edge between the Operator before "
  627. << "'depend' and the Operator after 'depend'.";
  628. }
  629. bool_result =
  630. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  631. }
  632. }
  633. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << cnode->operator_info()->name();
  634. }
  635. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  636. }
  637. std::pair<AnfNodePtr, std::vector<AnfNodePtr>> CNodeWithRefKeys(const AnfNodePtr &cnode) {
  638. MS_EXCEPTION_IF_NULL(cnode);
  639. std::vector<AnfNodePtr> refkeys;
  640. if (cnode->isa<CNode>()) {
  641. auto cnode_ptr = cnode->cast<CNodePtr>();
  642. auto inputs = cnode_ptr->inputs();
  643. for (auto &one_input : inputs) {
  644. if (IsValueNode<RefKey>(one_input)) {
  645. refkeys.push_back(one_input);
  646. }
  647. }
  648. if (refkeys.size() >= 1) {
  649. return std::make_pair(cnode, refkeys);
  650. }
  651. }
  652. return {nullptr, refkeys};
  653. }
  654. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  655. // Step 3
  656. for (auto &node : all_nodes) {
  657. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  658. if ((!node->isa<Parameter>()) && (cnode_with_refkeys.first == nullptr)) {
  659. continue;
  660. }
  661. std::string parameter_name;
  662. AnfNodePtr target_parameter = nullptr;
  663. AnfNodeIndexSet target_set;
  664. if (cnode_with_refkeys.first != nullptr) {
  665. // Dealing with the RefKey case
  666. auto refkeys = cnode_with_refkeys.second;
  667. auto cnode = cnode_with_refkeys.first;
  668. auto cnode_ptr = cnode->cast<CNodePtr>();
  669. if (cnode_ptr == nullptr || !IsValueNode<Primitive>(cnode_ptr->input(0))) {
  670. continue;
  671. }
  672. if (!IsAutoParallelCareNode(cnode_ptr)) {
  673. continue;
  674. }
  675. if (refkeys.size() > 1) {
  676. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << " 's inputs have more than 1 RefKeys.";
  677. }
  678. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  679. auto cnode_func_graph = cnode->func_graph();
  680. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  681. // Find the RefKey being used
  682. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  683. for (auto &candidate : candidate_set_by_refkey) {
  684. auto candidate_node = candidate.first;
  685. auto c = candidate_node->cast<CNodePtr>();
  686. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  687. continue;
  688. }
  689. if (!IsAutoParallelCareNode(c)) {
  690. continue;
  691. }
  692. target_set.add(candidate);
  693. }
  694. // Find the corresponding Parameter being used
  695. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  696. if (parameters.size() != 1) {
  697. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  698. }
  699. parameter_name = parameters[0]->cast<ParameterPtr>()->name();
  700. target_parameter = parameters[0];
  701. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  702. for (auto &candidate : candidate_set_by_para) {
  703. auto candidate_node = candidate.first;
  704. auto c = candidate_node->cast<CNodePtr>();
  705. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  706. continue;
  707. }
  708. if (!IsAutoParallelCareNode(c)) {
  709. continue;
  710. }
  711. (void)target_set.insert(candidate);
  712. }
  713. } else if (node->isa<Parameter>()) {
  714. // Dealing with the Parameter case
  715. MS_EXCEPTION_IF_NULL(node->func_graph());
  716. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  717. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  718. for (auto &candidate : candidate_set) {
  719. auto candidate_node = candidate.first;
  720. auto c = candidate_node->cast<CNodePtr>();
  721. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  722. continue;
  723. }
  724. if (!IsAutoParallelCareNode(c)) {
  725. continue;
  726. }
  727. (void)target_set.insert(candidate);
  728. }
  729. // In this case, node is a Parameter
  730. parameter_name = node->cast<ParameterPtr>()->name();
  731. target_parameter = node;
  732. }
  733. if (target_set.size() <= 1) {
  734. continue;
  735. }
  736. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  737. std::set<std::string> target_without_duplicate;
  738. for (auto &target : target_set) {
  739. auto target_cnode = target.first->cast<CNodePtr>();
  740. auto input_index = target.second;
  741. (void)target_without_duplicate.insert(std::to_string(input_index) + target_cnode->operator_info()->name());
  742. }
  743. if (target_without_duplicate.size() <= 1) {
  744. continue;
  745. }
  746. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  747. OperatorInfoPtr tmp_identity_ptr;
  748. bool new_identity = false;
  749. std::string tmp_identity_name;
  750. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  751. if (returned_identity != nullptr) {
  752. // In this case, the TmpIdentityInfo instance has already been created
  753. new_identity = false;
  754. tmp_identity_ptr = returned_identity;
  755. tmp_identity_name = tmp_identity_ptr->name();
  756. } else {
  757. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  758. new_identity = true;
  759. // 1) extract input shape from this Parameter
  760. MS_EXCEPTION_IF_NULL(target_parameter);
  761. AbstractBasePtr abstract = target_parameter->abstract();
  762. if (abstract == nullptr) {
  763. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  764. }
  765. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  766. if (input_shape == nullptr) {
  767. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  768. }
  769. std::vector<int> shape_int = input_shape->shape();
  770. Shape shape;
  771. (void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
  772. [](int sub_shape) { return static_cast<int32_t>(sub_shape); });
  773. Shapes inputs_shape = {shape};
  774. Shapes outputs_shape = {shape};
  775. // 2) init the attr
  776. std::unordered_map<std::string, ValuePtr> attr = {};
  777. // Create the TmpIdentity instance
  778. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  779. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  780. TOTAL_OPS++;
  781. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  782. // Set the parameter and type lengths for inputs and outputs
  783. std::vector<bool> is_parameter;
  784. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  785. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  786. if (casted_target_parameter->has_default()) {
  787. bool require_grad = py::cast<bool>(
  788. parse::python_adapter::GetPyObjAttr(casted_target_parameter->default_param(), "requires_grad"));
  789. is_parameter.push_back(require_grad);
  790. } else {
  791. is_parameter.push_back(false);
  792. }
  793. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  794. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  795. }
  796. auto node_type = target_parameter->Type();
  797. if (node_type->isa<mindspore::TensorType>()) {
  798. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  799. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  800. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  801. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  802. }
  803. } else {
  804. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  805. }
  806. // Generate strategies for this TmpIdentityInfo instance;
  807. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  808. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  809. }
  810. }
  811. // A flag recording whether new edges have been created or not
  812. bool add_identity_edge = false;
  813. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  814. for (auto &target : target_set) {
  815. auto target_cnode = target.first->cast<CNodePtr>();
  816. auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
  817. auto input_index = target.second;
  818. std::string edge_name =
  819. std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_cnode->operator_info()->name();
  820. // If the edge between these two operators already has been added, then the edge will not be added again.
  821. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, IntToSize(input_index - 1))) {
  822. continue;
  823. }
  824. std::shared_ptr<Edge> edge_ptr = std::make_shared<Edge>(
  825. edge_name, tmp_identity_ptr, target_cnode->operator_info(), 0, input_index - 1, false, true);
  826. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  827. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  828. }
  829. target_cnode->operator_info()->AddPrevEdge(edge_ptr);
  830. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  831. entire_costgraph->AddEdge(tmp_identity_ptr, target_cnode->operator_info(), edge_ptr);
  832. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  833. << target_cnode->operator_info()->name();
  834. add_identity_edge = true;
  835. }
  836. if (new_identity && add_identity_edge) {
  837. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  838. entire_costgraph->AddOperator(tmp_identity_ptr);
  839. }
  840. }
  841. }
  842. bool FindReshape(const CNodePtr &cnode) {
  843. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  844. return false;
  845. }
  846. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  847. if (!IsParallelCareNode(cnode) || (cnode->operator_info() == nullptr)) {
  848. return false;
  849. }
  850. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  851. MS_EXCEPTION_IF_NULL(prim);
  852. OperatorInfoPtr operator_info = cnode->operator_info();
  853. if (operator_info == nullptr) {
  854. MS_LOG(EXCEPTION) << "Failure:Primitive " << prim->ToString() << " OperatorInstance is nullptr";
  855. }
  856. if (prim->name() != RESHAPE) {
  857. return false;
  858. }
  859. return true;
  860. }
  861. // find previous node, then obtain its strategy_cost_ vector to get its layout vector.
  862. bool FindPreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int32_t *out_index) {
  863. // if previous node is a parameter, handle it in the outsize.
  864. if (node->isa<Parameter>()) {
  865. return false;
  866. }
  867. if (!node->isa<CNode>()) {
  868. return false;
  869. }
  870. CNodePtr cnode = node->cast<CNodePtr>();
  871. if (!IsValueNode<Primitive>(cnode->input(0))) {
  872. return false;
  873. }
  874. if (IsParallelCareNode(cnode) && (cnode->operator_info() != nullptr)) {
  875. *pre_operator_info = cnode->operator_info();
  876. *out_index = 0;
  877. return true;
  878. }
  879. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  880. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  881. if (prim->name() == TUPLE_GETITEM) {
  882. *out_index = GetTupleGetItemIndex(cnode);
  883. // find tuple_get_item's previous node
  884. auto pre_node = cnode->input(1);
  885. if (!pre_node->isa<CNode>()) {
  886. MS_LOG(EXCEPTION) << "tuple get item's second input is not a cnode";
  887. }
  888. CNodePtr pre_cnode = pre_node->cast<CNodePtr>();
  889. if (IsParallelCareNode(pre_cnode) && (pre_cnode->operator_info() != nullptr)) {
  890. *pre_operator_info = pre_cnode->operator_info();
  891. return true;
  892. }
  893. return false;
  894. }
  895. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  896. if (prim->name() == DEPEND && index != 1) {
  897. continue;
  898. }
  899. if (!FindPreNodeStraCosts(cnode->inputs()[index], pre_operator_info, out_index)) {
  900. continue;
  901. }
  902. return true;
  903. }
  904. MS_LOG(WARNING) << "FindPreNodeStraCosts failed, if reshape is not the first primitive, there must be some error";
  905. return false;
  906. }
  907. // find next node, then obtain its strategy_cost_ vector to get its layout vector.
  908. // if reshape's output connect to several primitive, return the first layout found
  909. bool FindNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int32_t *in_index) {
  910. MS_EXCEPTION_IF_NULL(cnode);
  911. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  912. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  913. MS_EXCEPTION_IF_NULL(manager);
  914. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  915. for (auto &node_pair : node_set) {
  916. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  917. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  918. continue;
  919. }
  920. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  921. MS_EXCEPTION_IF_NULL(prim_anf_node);
  922. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  923. MS_EXCEPTION_IF_NULL(node_prim);
  924. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  925. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  926. continue;
  927. }
  928. if (IsParallelCareNode(use_apply) && (use_apply->operator_info() != nullptr)) {
  929. MS_LOG(INFO) << "FindNextNodeStraCosts success prim " << node_prim->name();
  930. *next_operator_info = use_apply->operator_info();
  931. *in_index = node_pair.second - 1;
  932. return true;
  933. }
  934. MS_LOG(DEBUG) << "FindNextNodeStraCosts failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  935. << " " << (use_apply->operator_info() != nullptr);
  936. if (FindNextNodeStraCosts(use_apply, next_operator_info, in_index)) {
  937. return true;
  938. }
  939. }
  940. return false;
  941. }
  942. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  943. for (auto node : all_nodes) {
  944. auto cnode = node->cast<CNodePtr>();
  945. if (!FindReshape(cnode)) {
  946. continue;
  947. }
  948. MS_ASSERT(cnode->inputs().size() == 3);
  949. // get previous node's strategy_cost_
  950. auto pre_node = cnode->input(1);
  951. int32_t out_index = 0;
  952. OperatorInfoPtr pre_operator_info;
  953. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  954. if (pre_node->isa<Parameter>()) {
  955. OperatorInfoPtr operator_info = cnode->operator_info();
  956. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  957. reshape_info->SetCostForReshapeWithParameter();
  958. pre_operator_info = reshape_info;
  959. pre_stra_costs = reshape_info->strategy_cost();
  960. } else {
  961. if (!FindPreNodeStraCosts(pre_node, &pre_operator_info, &out_index)) {
  962. MS_LOG(EXCEPTION) << "FindPreNodeStraCosts for reshape failed";
  963. }
  964. pre_stra_costs = pre_operator_info->strategy_cost();
  965. }
  966. // get next node's strategy_cost_
  967. int32_t in_index = 0;
  968. OperatorInfoPtr next_operator_info;
  969. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  970. bool find_next_node = FindNextNodeStraCosts(cnode, &next_operator_info, &in_index);
  971. if (!find_next_node) {
  972. MS_LOG(INFO) << "FindNextNodeStraCosts for reshape failed";
  973. }
  974. // set input_layout and output_layout for reshape.
  975. // init reshape and set cost for each input_layout and output_layout.
  976. OperatorInfoPtr operator_info = cnode->operator_info();
  977. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  978. reshape_info->set_pre_operator_name(pre_operator_info->name());
  979. reshape_info->set_pre_operator_index(out_index);
  980. if (find_next_node) {
  981. next_stra_costs = next_operator_info->strategy_cost();
  982. reshape_info->set_next_operator_name(next_operator_info->name());
  983. reshape_info->set_next_operator_index(in_index);
  984. }
  985. bool is_prev_param = pre_node->isa<Parameter>();
  986. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param) !=
  987. SUCCESS) {
  988. MS_LOG(EXCEPTION) << "reshape genetate strategy_costs failed!";
  989. }
  990. }
  991. }
  992. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  993. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  994. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  995. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  996. // for each OperatorInfo;
  997. // Step 1.1: Deal with 'Reshape':
  998. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  999. // layout as its output layout.
  1000. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  1001. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  1002. // for each edge, based on the strategies of two OperatorInfos;
  1003. // Step 3: Augment the costgraph:
  1004. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  1005. // operator for this Parameter, and add an edge for the use of this Parameter by each
  1006. // subsequent operator;
  1007. // Step 3.1: Calculate memory usage:
  1008. // note the memory usage calculation is different in training phase and inference phase.
  1009. // Step 4: Run the Dynamic Programming algorithm:
  1010. // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge
  1011. // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input
  1012. // tensor layout. Note that there may be several connected components in the costgraph, and the DP algorithm
  1013. // runs on each of them.
  1014. //
  1015. // OUTPUT: the determined strategy for each operator.
  1016. // Step 1
  1017. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  1018. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  1019. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1020. << entire_costgraph->GetOperators().size() << " operators.";
  1021. } else {
  1022. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1023. }
  1024. } else {
  1025. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1026. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1027. << entire_costgraph->GetOperators().size() << " operators.";
  1028. } else {
  1029. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1030. }
  1031. }
  1032. // Step 1.1
  1033. ReshapeCostCompute(all_nodes);
  1034. // Step 2
  1035. ConstructCostGraphEdges(all_nodes);
  1036. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  1037. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  1038. // Step 3: Augment the costgraph.
  1039. AugmentCostGraph(all_nodes);
  1040. MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size()
  1041. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  1042. // Step 3.1: Calculate the memory usage
  1043. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  1044. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  1045. }
  1046. // Step 4: run DP algorithm on the costgraph.
  1047. if (GetStrategy(entire_costgraph) != SUCCESS) {
  1048. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  1049. return FAILED;
  1050. }
  1051. MS_LOG(INFO) << "Searching strategy succeeded.";
  1052. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1053. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1054. } else {
  1055. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  1056. }
  1057. // print the selected strategy
  1058. for (auto &op : entire_costgraph->GetOperators()) {
  1059. StrategyPtr s_strategy = op->selected_strategy();
  1060. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1061. PrintStrategy(s_strategy);
  1062. }
  1063. return SUCCESS;
  1064. }
  1065. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  1066. std::vector<std::vector<std::string>> input_tensor_names) {
  1067. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  1068. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  1069. if (it->first == input_tensor_names[j][k]) {
  1070. input_tensor_names[j][k] = it->second;
  1071. break;
  1072. }
  1073. }
  1074. }
  1075. return input_tensor_names;
  1076. }
  1077. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1078. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1079. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  1080. << " operators.";
  1081. } else {
  1082. MS_LOG(ERROR) << "Constructing nodes for cost graph failed.";
  1083. return FAILED;
  1084. }
  1085. auto ops = entire_costgraph->GetOperators();
  1086. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1087. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1088. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1089. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1090. }
  1091. std::shared_ptr<std::vector<size_t>> ops_nodes_list(new std::vector<size_t>);
  1092. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1093. size_t num_device = g_device_manager->DeviceNum();
  1094. double device_memory = entire_costgraph->GetDeviceMemory();
  1095. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1096. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1097. } else {
  1098. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1099. return FAILED;
  1100. }
  1101. GenerateStrategy(graph, ops);
  1102. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1103. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1104. } else {
  1105. MS_LOG(ERROR) << "Init selected strategy failed.";
  1106. return FAILED;
  1107. }
  1108. // print the selected strategy
  1109. for (auto &op : entire_costgraph->GetOperators()) {
  1110. StrategyPtr s_strategy = op->selected_strategy();
  1111. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1112. PrintStrategy(s_strategy);
  1113. }
  1114. return SUCCESS;
  1115. }
  1116. } // namespace parallel
  1117. } // namespace mindspore