You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 53 kB

5 years ago
5 years ago
6 years ago
6 years ago
5 years ago
6 years ago
5 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_auto_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include "ir/anf.h"
  28. #include "ir/param_info.h"
  29. #include "ir/tensor.h"
  30. #include "frontend/optimizer/opt.h"
  31. #include "frontend/optimizer/optimizer.h"
  32. #include "frontend/parallel/auto_parallel/dp_algo_costmodel.h"
  33. #include "frontend/parallel/auto_parallel/edge_costmodel.h"
  34. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  35. #include "frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  36. #include "frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h"
  37. #include "frontend/parallel/auto_parallel/rec_core/rec_partition.h"
  38. #include "frontend/parallel/context.h"
  39. #include "frontend/parallel/ops_info/tmp_identity_info.h"
  40. #include "frontend/parallel/ops_info/reshape_info.h"
  41. #include "frontend/parallel/graph_util/node_info.h"
  42. #include "frontend/parallel/step_parallel.h"
  43. #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  44. namespace mindspore {
  45. namespace parallel {
  46. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  47. MS_EXCEPTION_IF_NULL(root);
  48. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  49. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  50. // assume no change to graph
  51. bool changes = false;
  52. // control whether use model_parallel mode
  53. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  54. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  55. return changes;
  56. }
  57. // check whether strategy_search_mode is valid
  58. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  59. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  60. // Setting searching mode: dynanic programming as default.
  61. strategy_search_mode = DYNAMIC_PROGRAMMING;
  62. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  63. }
  64. struct timeval start_time, end_time;
  65. (void)gettimeofday(&start_time, nullptr);
  66. if (MsContext::GetInstance()->save_graphs_flag()) {
  67. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  68. }
  69. MS_LOG(INFO) << "Now entering step auto parallel";
  70. TOTAL_OPS = 0;
  71. AnfNodePtr ret = root->get_return();
  72. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  73. if (ParallelInit() != SUCCESS) {
  74. MS_LOG(EXCEPTION) << "Parallel init failed";
  75. }
  76. // mark the forward cnodes, parallel only care these nodes
  77. MarkForwardCNode(root);
  78. if (FindCommunicationOp(all_nodes)) {
  79. MS_LOG(EXCEPTION) << "The graph contain communication op";
  80. }
  81. // search parallelization strategy
  82. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  83. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  84. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  85. }
  86. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  87. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  88. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  89. }
  90. } else {
  91. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  92. }
  93. (void)gettimeofday(&end_time, nullptr);
  94. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  95. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  96. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  97. root->set_flag(AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  98. return changes;
  99. }
  100. // Given the node, return whether each input is a parameter or a output of a operator.
  101. // The returned boolean vector should be the same order of the inputs, thus its implementation
  102. // is closely consistent with ExtractShape() in step_parallel.cc
  103. std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
  104. std::vector<bool> is_parameter;
  105. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  106. if ((node_inputs.size() == 2) && AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE)) {
  107. node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
  108. }
  109. for (size_t i = 1; i < node_inputs.size(); ++i) {
  110. auto input = node_inputs[i];
  111. if (input->isa<Parameter>()) {
  112. auto input_parameter = input->cast<ParameterPtr>();
  113. is_parameter.push_back(ParameterRequireGrad(input_parameter));
  114. } else if (input->isa<CNode>() || IsValueNode<tensor::Tensor>(input) || IsValueNode<RefKey>(input)) {
  115. is_parameter.push_back(false);
  116. }
  117. }
  118. return is_parameter;
  119. }
  120. // Given the type, return the number of bytes to represent this type
  121. size_t GetLengthOfDataType(const TypePtr &type) {
  122. switch (type->type_id()) {
  123. case kNumberTypeBool:
  124. return sizeof(bool);
  125. case kNumberTypeInt8:
  126. return sizeof(int8_t);
  127. case kNumberTypeInt16:
  128. return sizeof(int16_t);
  129. case kNumberTypeInt32:
  130. return sizeof(int32_t);
  131. case kNumberTypeInt64:
  132. return sizeof(int64_t);
  133. case kNumberTypeUInt8:
  134. return sizeof(uint8_t);
  135. case kNumberTypeUInt16:
  136. return sizeof(uint16_t);
  137. case kNumberTypeUInt32:
  138. return sizeof(uint32_t);
  139. case kNumberTypeUInt64:
  140. return sizeof(uint64_t);
  141. case kNumberTypeFloat16:
  142. return sizeof(float) / 2;
  143. case kNumberTypeFloat32:
  144. return sizeof(float);
  145. case kNumberTypeFloat64:
  146. return sizeof(double);
  147. case kNumberTypeInt:
  148. return sizeof(int);
  149. case kNumberTypeUInt:
  150. return sizeof(unsigned int);
  151. case kNumberTypeFloat:
  152. return sizeof(float);
  153. default:
  154. MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
  155. }
  156. }
  157. size_t GetInputsTypeLen(const AnfNodePtr &input) {
  158. MS_EXCEPTION_IF_NULL(input);
  159. if (!input->isa<CNode>() && !input->isa<Parameter>() && !IsValueNode<tensor::Tensor>(input)) {
  160. MS_LOG(EXCEPTION) << "The input node is not a cnode or parameter or tensor";
  161. }
  162. size_t input_type_len = 0;
  163. auto type = input->Type();
  164. MS_EXCEPTION_IF_NULL(type);
  165. if (type->isa<mindspore::TensorType>()) {
  166. auto input_element_type = type->cast<mindspore::TensorTypePtr>()->element();
  167. input_type_len = GetLengthOfDataType(input_element_type);
  168. } else {
  169. MS_LOG(EXCEPTION) << "Unknown type: " << type->type_name();
  170. }
  171. return input_type_len;
  172. }
  173. std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
  174. MS_EXCEPTION_IF_NULL(node);
  175. std::vector<size_t> inputs_type_len;
  176. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  177. if ((node_inputs.size() == 2) && AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE)) {
  178. node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
  179. }
  180. // extract input element length
  181. for (auto &input : node_inputs) {
  182. if (IsValueNode<RefKey>(input)) {
  183. auto func_graph = node->func_graph();
  184. MS_EXCEPTION_IF_NULL(func_graph);
  185. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  186. if (parameters.size() != 1) {
  187. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  188. }
  189. inputs_type_len.push_back(GetInputsTypeLen(parameters[0]));
  190. } else if (input->isa<CNode>() || input->isa<Parameter>() || IsValueNode<tensor::Tensor>(input)) {
  191. // extract input shape from parameter and apply node
  192. inputs_type_len.push_back(GetInputsTypeLen(input));
  193. }
  194. }
  195. return inputs_type_len;
  196. }
  197. std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
  198. MS_EXCEPTION_IF_NULL(node);
  199. std::vector<TypePtr> outputs_type;
  200. // extract output element type
  201. auto primary_output_type = node->Type();
  202. MS_EXCEPTION_IF_NULL(primary_output_type);
  203. if (primary_output_type->isa<mindspore::Tuple>()) {
  204. // in this case, the output is a tuple
  205. auto tuple_output_type = primary_output_type->cast<mindspore::TuplePtr>();
  206. auto elements = tuple_output_type->elements();
  207. for (auto &ele : elements) {
  208. if (ele->isa<mindspore::TensorType>()) {
  209. auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
  210. outputs_type.push_back(ele_element_type);
  211. } else {
  212. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  213. }
  214. }
  215. } else {
  216. // in this case, the output is a single tensor
  217. if (primary_output_type->isa<mindspore::TensorType>()) {
  218. auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
  219. outputs_type.push_back(element_type);
  220. } else {
  221. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  222. }
  223. }
  224. return outputs_type;
  225. }
  226. bool IsElementWiseOperator(const std::string &op_name) {
  227. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU,
  228. SQRT, CAST, POW, EXP, LOG, COS,
  229. ACOS, LOGICALNOT, NEG, SQUARE, SIGMOID};
  230. auto iter = elementwise_op.find(op_name);
  231. return (iter != elementwise_op.end());
  232. }
  233. bool IsSplittableOperator(const std::string &op_name) {
  234. // clang-format off
  235. static const std::set<std::string> splittable_op =
  236. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  237. FLOORDIV, L2_NORMALIZE, TENSOR_ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  238. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  239. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP,
  240. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT,
  241. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2, TILE, DROPOUT,
  242. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  243. EMBEDDING_LOOKUP};
  244. // clang-format on
  245. auto iter = splittable_op.find(op_name);
  246. return (iter != splittable_op.end());
  247. }
  248. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  249. MS_EXCEPTION_IF_NULL(cnode);
  250. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  251. if (prim_node == nullptr) {
  252. return false;
  253. }
  254. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  255. if (prim == nullptr) {
  256. return false;
  257. }
  258. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  259. if (bool_result && (prim->name() != MAKE_TUPLE)) {
  260. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  261. } else if (prim->name() == CAST) {
  262. if (cnode->fullname_with_scope().find(OPTIMIZER_SUB_STRING) != std::string::npos) {
  263. // Do not care CASTs from optimizer
  264. return false;
  265. }
  266. return true;
  267. }
  268. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  269. }
  270. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, StrategyMap *stra_map) {
  271. MS_EXCEPTION_IF_NULL(prim);
  272. MS_EXCEPTION_IF_NULL(cnode);
  273. auto attrs = prim->attrs();
  274. std::vector<Shapes> shape_list = ExtractShape(cnode);
  275. if (shape_list.empty()) {
  276. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  277. }
  278. // Create an OperatorInfo instance
  279. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  280. MS_EXCEPTION_IF_NULL(operator_info);
  281. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  282. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  283. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  284. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  285. return nullptr;
  286. }
  287. // Set the data type for inputs and outputs of this OperatorInfo
  288. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  289. auto outputs_type = ExtractOutputTypeByNode(cnode);
  290. std::vector<size_t> outputs_type_length;
  291. outputs_type_length.reserve(outputs_type.size());
  292. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  293. GetLengthOfDataType);
  294. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  295. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  296. return nullptr;
  297. }
  298. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  299. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  300. return nullptr;
  301. }
  302. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  303. // ANF graph
  304. auto &inputs = cnode->inputs();
  305. std::vector<ValuePtr> input_value;
  306. for (size_t index = 1; index < inputs.size(); ++index) {
  307. if (inputs[index]->isa<ValueNode>()) {
  308. input_value.push_back(GetValueNode(inputs[index]));
  309. } else {
  310. input_value.emplace_back(nullptr);
  311. }
  312. }
  313. operator_info->set_input_value(input_value);
  314. operator_info->set_outputs_dtype(cnode->Type());
  315. operator_info->set_cnode(cnode);
  316. // key of strategy map
  317. std::string strategy_key_name = "";
  318. auto param_names = NodeParameterName(cnode);
  319. if (!param_names.empty()) {
  320. strategy_key_name = param_names[0].first;
  321. }
  322. bool load_strategy_from_ckpt =
  323. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  324. // If no strategy has been configured for this operator, then candidate strategies are generated for
  325. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  326. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  327. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  328. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  329. // BatchParallelInfo operator
  330. operator_info->ComputeBatchSplitFlagList();
  331. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  332. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  333. return nullptr;
  334. }
  335. } else {
  336. // In this case, the configured strategy should be extracted to help setting cost
  337. StrategyPtr strategyPtr;
  338. if (load_strategy_from_ckpt) {
  339. strategyPtr = (*stra_map)[strategy_key_name];
  340. } else {
  341. strategyPtr = parallel::ExtractStrategy(attrs);
  342. }
  343. if (strategyPtr != nullptr) {
  344. if (prim->name() == RESHAPE) {
  345. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  346. }
  347. // Set cost for this configured strategy
  348. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  349. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  350. } else if (FULLY_USE_DEVICES) {
  351. // If configured to fully use devices, then checking for the user-specified strategy
  352. int32_t used_devices = operator_info->used_devices();
  353. MS_EXCEPTION_IF_NULL(g_device_manager);
  354. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  355. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  356. if (used_devices == 1) {
  357. return operator_info;
  358. }
  359. // 'used_devices == -1' means that 'used_devices_' is not set
  360. if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) {
  361. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  362. << "but the specified strategy uses device: " << used_devices
  363. << ", total devices: " << total_device_num;
  364. }
  365. }
  366. }
  367. }
  368. return operator_info;
  369. }
  370. // Using CNode's UniqueIds to construct nodes
  371. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  372. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  373. entire_costgraph = std::make_shared<CostGraph>();
  374. entire_costgraph->SetDeviceMemoryAndCostParameter();
  375. // The map from CNode's UniqueId to its operatorInfo
  376. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  377. // extract strategy from checkpoint for multi-train
  378. StrategyMap stra_map;
  379. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  380. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  381. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  382. }
  383. }
  384. // Step 1
  385. for (auto &node : all_nodes) {
  386. // NOTE: we only care about splittable Primitive operators
  387. auto cnode = node->cast<CNodePtr>();
  388. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  389. if (bool_result) {
  390. continue;
  391. }
  392. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  393. if (!IsAutoParallelCareNode(cnode)) {
  394. // Needed by rec_parser
  395. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  396. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  397. if (prev_cnode != nullptr) {
  398. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  399. }
  400. }
  401. continue;
  402. }
  403. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  404. MS_EXCEPTION_IF_NULL(prim);
  405. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  406. if (search_cnode == from_cnode_to_info.end()) {
  407. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  408. if (operator_info == nullptr) {
  409. return FAILED;
  410. }
  411. // Needed by rec_parser
  412. operator_info->set_type(prim->name());
  413. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  414. entire_costgraph->AddOperator(operator_info);
  415. cnode->set_user_data<OperatorInfo>(operator_info);
  416. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  417. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  418. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  419. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  420. // Needed by rec_parser
  421. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  422. } else {
  423. // Two CNODEs' UniqueIds should not be equal
  424. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  425. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  426. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  427. }
  428. }
  429. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  430. return SUCCESS;
  431. }
  432. // Using CNode's UniqueIdThroughCopys to construct nodes
  433. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  434. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  435. entire_costgraph = std::make_shared<CostGraph>();
  436. entire_costgraph->SetDeviceMemoryAndCostParameter();
  437. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  438. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  439. // extract strategy from checkpoint for multi-train
  440. StrategyMap stra_map;
  441. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  442. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  443. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  444. }
  445. }
  446. for (auto &node : all_nodes) {
  447. // NOTE: we only care about splittable Primitive operators
  448. auto cnode = node->cast<CNodePtr>();
  449. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  450. if (bool_result) {
  451. continue;
  452. }
  453. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  454. if (!IsAutoParallelCareNode(cnode)) {
  455. // Needed by rec_parser
  456. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  457. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  458. if (prev_cnode != nullptr) {
  459. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  460. }
  461. }
  462. continue;
  463. }
  464. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  465. // Find the operatorInfo if it exists
  466. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  467. if (search_cnode == from_cnode_to_info.end()) {
  468. // In this case, the corresponding OperatorInfo is not created, create the new one.
  469. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  470. if (operator_info == nullptr) {
  471. return FAILED;
  472. }
  473. // Needed by rec_parser
  474. operator_info->set_type(prim->name());
  475. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  476. entire_costgraph->AddOperator(operator_info);
  477. cnode->set_user_data<OperatorInfo>(operator_info);
  478. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  479. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  480. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  481. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  482. // Needed by rec_parser
  483. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  484. } else {
  485. auto current_op_ptr = search_cnode->second;
  486. if (current_op_ptr == nullptr) {
  487. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  488. } else {
  489. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  490. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  491. (current_op_ptr->name().find(prim->name()) == std::string::npos);
  492. if (is_find_wrong) {
  493. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  494. << " does not match the Prim: " << prim->name();
  495. }
  496. // Needed by rec_parser
  497. ModifyInputsTensorNameListIfOperatorInfoCreated(current_op_ptr->name(), cnode->UniqueId());
  498. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  499. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  500. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  501. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  502. }
  503. }
  504. }
  505. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  506. return SUCCESS;
  507. }
  508. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  509. // Step 2
  510. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  511. for (auto &node : all_nodes) {
  512. auto cnode = node->cast<CNodePtr>();
  513. bool bool_result_cnode = (cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0));
  514. if (bool_result_cnode) {
  515. continue;
  516. }
  517. auto &inputs = cnode->inputs();
  518. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  519. if (!IsAutoParallelCareNode(cnode)) {
  520. continue;
  521. }
  522. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  523. size_t edge_count = 0;
  524. auto node_op_info = cnode->user_data<OperatorInfo>();
  525. for (size_t i = 1; i < inputs.size(); ++i) {
  526. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  527. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  528. if (bool_result_prev_cnode) {
  529. continue;
  530. }
  531. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  532. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  533. size_t output_index = 0;
  534. bool bool_result =
  535. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  536. while (bool_result) {
  537. if (IsAutoParallelCareNode(prev_cnode)) {
  538. auto prev_op_info = prev_cnode->user_data<OperatorInfo>();
  539. std::string edge_name = prev_op_info->name() + OPERATOR_TO_OPERATOR_CONNECTOR + node_op_info->name();
  540. // If the edge between these two operators already has been added, then the edge will not be added again.
  541. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, i - 1)) {
  542. break;
  543. }
  544. EdgePtr edge_ptr;
  545. MS_LOG(INFO) << "Creating edge: " << edge_name;
  546. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  547. (ELEMENTWISE_OP_STRA_FOLLOW && IsElementWiseOperator(prev_prim->name()));
  548. if (follow_strategy) {
  549. // Redistribution in not allowed on the edge.
  550. // Elementwise operators have the same strategy as their previous operators.
  551. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, i - 1, false, true);
  552. } else {
  553. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, i - 1, false);
  554. }
  555. // Init costs for this edge
  556. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  557. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  558. }
  559. node_op_info->AddPrevEdge(edge_ptr);
  560. prev_op_info->AddSuccEdge(edge_ptr);
  561. entire_costgraph->AddEdge(prev_op_info, node_op_info, edge_ptr);
  562. MS_LOG(INFO) << "Successfully adding the edge between " << prev_op_info->name() << " and "
  563. << node_op_info->name();
  564. edge_count++;
  565. break;
  566. } else if (prev_prim->name() == TUPLE_GETITEM) {
  567. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  568. // this 'tuple_getitem'
  569. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  570. output_index = IntToSize(GetValue<int>(GetValueNode(prev_cnode->input(2))));
  571. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  572. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  573. if (bool_result_tuple) {
  574. break;
  575. }
  576. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  577. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  578. if (!IsAutoParallelCareNode(prev_cnode)) {
  579. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  580. }
  581. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  582. << "and creating an edge between the Operator before "
  583. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  584. } else if (prev_prim->name() == DEPEND) {
  585. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  586. // this 'depend'
  587. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  588. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  589. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  590. if (bool_result_depend) {
  591. break;
  592. }
  593. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  594. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  595. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  596. << "and creating an edge between the Operator before "
  597. << "'depend' and the Operator after 'depend'.";
  598. }
  599. bool_result =
  600. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  601. }
  602. }
  603. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << node_op_info->name();
  604. }
  605. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  606. }
  607. std::pair<AnfNodePtr, std::vector<AnfNodePtr>> CNodeWithRefKeys(const AnfNodePtr &cnode) {
  608. MS_EXCEPTION_IF_NULL(cnode);
  609. std::vector<AnfNodePtr> refkeys;
  610. if (cnode->isa<CNode>()) {
  611. auto cnode_ptr = cnode->cast<CNodePtr>();
  612. auto inputs = cnode_ptr->inputs();
  613. for (auto &one_input : inputs) {
  614. if (IsValueNode<RefKey>(one_input)) {
  615. refkeys.push_back(one_input);
  616. }
  617. }
  618. if (refkeys.size() >= 1) {
  619. return std::make_pair(cnode, refkeys);
  620. }
  621. }
  622. return {nullptr, refkeys};
  623. }
  624. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  625. // Step 3
  626. for (auto &node : all_nodes) {
  627. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  628. if ((!node->isa<Parameter>()) && (cnode_with_refkeys.first == nullptr)) {
  629. continue;
  630. }
  631. std::string parameter_name;
  632. AnfNodePtr target_parameter = nullptr;
  633. AnfNodeIndexSet target_set;
  634. if (cnode_with_refkeys.first != nullptr) {
  635. // Dealing with the RefKey case
  636. auto refkeys = cnode_with_refkeys.second;
  637. auto cnode = cnode_with_refkeys.first;
  638. auto cnode_ptr = cnode->cast<CNodePtr>();
  639. if (cnode_ptr == nullptr || !IsValueNode<Primitive>(cnode_ptr->input(0))) {
  640. continue;
  641. }
  642. if (!IsAutoParallelCareNode(cnode_ptr)) {
  643. continue;
  644. }
  645. if (refkeys.size() > 1) {
  646. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << " 's inputs have more than 1 RefKeys.";
  647. }
  648. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  649. auto cnode_func_graph = cnode->func_graph();
  650. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  651. // Find the RefKey being used
  652. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  653. for (auto &candidate : candidate_set_by_refkey) {
  654. auto candidate_node = candidate.first;
  655. auto c = candidate_node->cast<CNodePtr>();
  656. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  657. continue;
  658. }
  659. if (!IsAutoParallelCareNode(c)) {
  660. continue;
  661. }
  662. target_set.add(candidate);
  663. }
  664. // Find the corresponding Parameter being used
  665. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  666. if (parameters.size() != 1) {
  667. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  668. }
  669. parameter_name = parameters[0]->cast<ParameterPtr>()->name();
  670. target_parameter = parameters[0];
  671. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  672. for (auto &candidate : candidate_set_by_para) {
  673. auto candidate_node = candidate.first;
  674. auto c = candidate_node->cast<CNodePtr>();
  675. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  676. continue;
  677. }
  678. if (!IsAutoParallelCareNode(c)) {
  679. continue;
  680. }
  681. (void)target_set.insert(candidate);
  682. }
  683. } else if (node->isa<Parameter>()) {
  684. // Dealing with the Parameter case
  685. MS_EXCEPTION_IF_NULL(node->func_graph());
  686. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  687. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  688. for (auto &candidate : candidate_set) {
  689. auto candidate_node = candidate.first;
  690. auto c = candidate_node->cast<CNodePtr>();
  691. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  692. continue;
  693. }
  694. if (!IsAutoParallelCareNode(c)) {
  695. continue;
  696. }
  697. (void)target_set.insert(candidate);
  698. }
  699. // In this case, node is a Parameter
  700. parameter_name = node->cast<ParameterPtr>()->name();
  701. target_parameter = node;
  702. }
  703. if (target_set.size() <= 1) {
  704. continue;
  705. }
  706. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  707. std::set<std::string> target_without_duplicate;
  708. for (auto &target : target_set) {
  709. auto target_cnode = target.first->cast<CNodePtr>();
  710. auto input_index = target.second;
  711. (void)target_without_duplicate.insert(std::to_string(input_index) +
  712. target_cnode->user_data<OperatorInfo>()->name());
  713. }
  714. if (target_without_duplicate.size() <= 1) {
  715. continue;
  716. }
  717. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  718. OperatorInfoPtr tmp_identity_ptr;
  719. bool new_identity = false;
  720. std::string tmp_identity_name;
  721. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  722. if (returned_identity != nullptr) {
  723. // In this case, the TmpIdentityInfo instance has already been created
  724. new_identity = false;
  725. tmp_identity_ptr = returned_identity;
  726. tmp_identity_name = tmp_identity_ptr->name();
  727. } else {
  728. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  729. new_identity = true;
  730. // 1) extract input shape from this Parameter
  731. MS_EXCEPTION_IF_NULL(target_parameter);
  732. AbstractBasePtr abstract = target_parameter->abstract();
  733. if (abstract == nullptr) {
  734. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  735. }
  736. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  737. if (input_shape == nullptr) {
  738. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  739. }
  740. std::vector<int> shape_int = input_shape->shape();
  741. Shape shape;
  742. (void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
  743. [](int sub_shape) { return static_cast<int64_t>(sub_shape); });
  744. Shapes inputs_shape = {shape};
  745. Shapes outputs_shape = {shape};
  746. // 2) init the attr
  747. std::unordered_map<std::string, ValuePtr> attr = {};
  748. // Create the TmpIdentity instance
  749. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  750. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  751. TOTAL_OPS++;
  752. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  753. // Set the parameter and type lengths for inputs and outputs
  754. std::vector<bool> is_parameter;
  755. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  756. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  757. is_parameter.push_back(ParameterRequireGrad(casted_target_parameter));
  758. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  759. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  760. }
  761. auto node_type = target_parameter->Type();
  762. if (node_type->isa<mindspore::TensorType>()) {
  763. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  764. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  765. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  766. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  767. }
  768. } else {
  769. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  770. }
  771. // Generate strategies for this TmpIdentityInfo instance;
  772. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  773. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  774. }
  775. }
  776. // A flag recording whether new edges have been created or not
  777. bool add_identity_edge = false;
  778. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  779. for (auto &target : target_set) {
  780. auto target_cnode = target.first->cast<CNodePtr>();
  781. auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
  782. auto input_index = target.second;
  783. auto target_op_info = target_cnode->user_data<OperatorInfo>();
  784. std::string edge_name = std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_op_info->name();
  785. // If the edge between these two operators already has been added, then the edge will not be added again.
  786. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, IntToSize(input_index - 1))) {
  787. continue;
  788. }
  789. std::shared_ptr<Edge> edge_ptr =
  790. std::make_shared<Edge>(edge_name, tmp_identity_ptr, target_op_info, 0, input_index - 1, false, true);
  791. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  792. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  793. }
  794. target_op_info->AddPrevEdge(edge_ptr);
  795. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  796. entire_costgraph->AddEdge(tmp_identity_ptr, target_op_info, edge_ptr);
  797. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  798. << target_op_info->name();
  799. add_identity_edge = true;
  800. }
  801. if (new_identity && add_identity_edge) {
  802. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  803. entire_costgraph->AddOperator(tmp_identity_ptr);
  804. }
  805. }
  806. }
  807. bool FindReshape(const CNodePtr &cnode) {
  808. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  809. return false;
  810. }
  811. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  812. return false;
  813. }
  814. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  815. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  816. MS_EXCEPTION_IF_NULL(prim);
  817. return (prim->name() == RESHAPE);
  818. }
  819. // find previous node, then obtain its strategy_cost_ vector to get its layout vector.
  820. bool FindPreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int32_t *out_index) {
  821. // if previous node is a parameter, handle it in the outsize.
  822. if (node->isa<Parameter>()) {
  823. return false;
  824. }
  825. if (!node->isa<CNode>()) {
  826. return false;
  827. }
  828. CNodePtr cnode = node->cast<CNodePtr>();
  829. if (!IsValueNode<Primitive>(cnode->input(0))) {
  830. return false;
  831. }
  832. auto node_op_info = cnode->user_data<OperatorInfo>();
  833. if (IsParallelCareNode(cnode) && (node_op_info != nullptr)) {
  834. *pre_operator_info = node_op_info;
  835. *out_index = 0;
  836. return true;
  837. }
  838. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  839. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  840. if (prim->name() == TUPLE_GETITEM) {
  841. *out_index = GetTupleGetItemIndex(cnode);
  842. // find tuple_get_item's previous node
  843. auto pre_node = cnode->input(1);
  844. if (!pre_node->isa<CNode>()) {
  845. MS_LOG(EXCEPTION) << "tuple get item's second input is not a cnode";
  846. }
  847. CNodePtr pre_cnode = pre_node->cast<CNodePtr>();
  848. auto pre_op_info = pre_cnode->user_data<OperatorInfo>();
  849. if (IsParallelCareNode(pre_cnode) && (pre_op_info != nullptr)) {
  850. *pre_operator_info = pre_op_info;
  851. return true;
  852. }
  853. return false;
  854. }
  855. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  856. if (prim->name() == DEPEND && index != 1) {
  857. continue;
  858. }
  859. if (!FindPreNodeStraCosts(cnode->inputs()[index], pre_operator_info, out_index)) {
  860. continue;
  861. }
  862. return true;
  863. }
  864. MS_LOG(WARNING) << "FindPreNodeStraCosts failed, if reshape is not the first primitive, there must be some error";
  865. return false;
  866. }
  867. // find next node, then obtain its strategy_cost_ vector to get its layout vector.
  868. // if reshape's output connect to several primitive, return the first layout found
  869. bool FindNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int32_t *in_index) {
  870. MS_EXCEPTION_IF_NULL(cnode);
  871. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  872. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  873. MS_EXCEPTION_IF_NULL(manager);
  874. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  875. for (auto &node_pair : node_set) {
  876. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  877. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  878. continue;
  879. }
  880. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  881. MS_EXCEPTION_IF_NULL(prim_anf_node);
  882. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  883. MS_EXCEPTION_IF_NULL(node_prim);
  884. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  885. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  886. continue;
  887. }
  888. auto op_info = use_apply->user_data<OperatorInfo>();
  889. if (IsParallelCareNode(use_apply) && (op_info != nullptr)) {
  890. MS_LOG(INFO) << "FindNextNodeStraCosts success prim " << node_prim->name();
  891. *next_operator_info = op_info;
  892. *in_index = node_pair.second - 1;
  893. return true;
  894. }
  895. MS_LOG(DEBUG) << "FindNextNodeStraCosts failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  896. << " " << (op_info != nullptr);
  897. if (FindNextNodeStraCosts(use_apply, next_operator_info, in_index)) {
  898. return true;
  899. }
  900. }
  901. return false;
  902. }
  903. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  904. for (auto node : all_nodes) {
  905. auto cnode = node->cast<CNodePtr>();
  906. if (!FindReshape(cnode)) {
  907. continue;
  908. }
  909. MS_ASSERT(cnode->inputs().size() == 3);
  910. // get previous node's strategy_cost_
  911. auto pre_node = cnode->input(1);
  912. int32_t out_index = 0;
  913. OperatorInfoPtr pre_operator_info;
  914. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  915. auto operator_info = cnode->user_data<OperatorInfo>();
  916. if (pre_node->isa<Parameter>()) {
  917. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  918. reshape_info->SetCostForReshapeWithParameter();
  919. pre_operator_info = reshape_info;
  920. pre_stra_costs = reshape_info->strategy_cost();
  921. } else {
  922. if (!FindPreNodeStraCosts(pre_node, &pre_operator_info, &out_index)) {
  923. MS_LOG(EXCEPTION) << "FindPreNodeStraCosts for reshape failed";
  924. }
  925. pre_stra_costs = pre_operator_info->strategy_cost();
  926. }
  927. // get next node's strategy_cost_
  928. int32_t in_index = 0;
  929. OperatorInfoPtr next_operator_info;
  930. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  931. bool find_next_node = FindNextNodeStraCosts(cnode, &next_operator_info, &in_index);
  932. if (!find_next_node) {
  933. MS_LOG(INFO) << "FindNextNodeStraCosts for reshape failed";
  934. }
  935. // set input_layout and output_layout for reshape.
  936. // init reshape and set cost for each input_layout and output_layout.
  937. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  938. reshape_info->set_pre_operator_name(pre_operator_info->name());
  939. reshape_info->set_pre_operator_index(out_index);
  940. if (find_next_node) {
  941. next_stra_costs = next_operator_info->strategy_cost();
  942. reshape_info->set_next_operator_name(next_operator_info->name());
  943. reshape_info->set_next_operator_index(in_index);
  944. }
  945. bool is_prev_param = pre_node->isa<Parameter>();
  946. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param) !=
  947. SUCCESS) {
  948. MS_LOG(EXCEPTION) << "reshape genetate strategy_costs failed!";
  949. }
  950. }
  951. }
  952. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  953. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  954. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  955. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  956. // for each OperatorInfo;
  957. // Step 1.1: Deal with 'Reshape':
  958. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  959. // layout as its output layout.
  960. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  961. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  962. // for each edge, based on the strategies of two OperatorInfos;
  963. // Step 3: Augment the costgraph:
  964. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  965. // operator for this Parameter, and add an edge for the use of this Parameter by each
  966. // subsequent operator;
  967. // Step 3.1: Calculate memory usage:
  968. // note the memory usage calculation is different in training phase and inference phase.
  969. // Step 4: Run the Dynamic Programming algorithm:
  970. // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge
  971. // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input
  972. // tensor layout. Note that there may be several connected components in the costgraph, and the DP algorithm
  973. // runs on each of them.
  974. //
  975. // OUTPUT: the determined strategy for each operator.
  976. // Step 1
  977. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  978. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  979. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  980. << entire_costgraph->GetOperators().size() << " operators.";
  981. } else {
  982. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  983. }
  984. } else {
  985. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  986. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  987. << entire_costgraph->GetOperators().size() << " operators.";
  988. } else {
  989. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  990. }
  991. }
  992. // Step 1.1
  993. ReshapeCostCompute(all_nodes);
  994. // Step 2
  995. ConstructCostGraphEdges(all_nodes);
  996. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  997. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  998. // Step 3: Augment the costgraph.
  999. AugmentCostGraph(all_nodes);
  1000. MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size()
  1001. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  1002. // Step 3.1: Calculate the memory usage
  1003. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  1004. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  1005. }
  1006. // Step 4: run DP algorithm on the costgraph.
  1007. if (GetStrategy(entire_costgraph) != SUCCESS) {
  1008. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  1009. return FAILED;
  1010. }
  1011. MS_LOG(INFO) << "Searching strategy succeeded.";
  1012. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1013. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1014. } else {
  1015. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  1016. }
  1017. // print the selected strategy
  1018. for (auto &op : entire_costgraph->GetOperators()) {
  1019. StrategyPtr s_strategy = op->selected_strategy();
  1020. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1021. PrintStrategy(s_strategy);
  1022. }
  1023. return SUCCESS;
  1024. }
  1025. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  1026. std::vector<std::vector<std::string>> input_tensor_names) {
  1027. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  1028. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  1029. if (it->first == input_tensor_names[j][k]) {
  1030. input_tensor_names[j][k] = it->second;
  1031. break;
  1032. }
  1033. }
  1034. }
  1035. return input_tensor_names;
  1036. }
  1037. CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node) {
  1038. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1039. if (prim->name() == TUPLE_GETITEM || prim->name() == DEPEND) {
  1040. auto prev_cnode = cnode->input(1)->cast<CNodePtr>();
  1041. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  1042. return nullptr;
  1043. }
  1044. auto prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  1045. while (prev_prim->name() == TUPLE_GETITEM || prev_prim->name() == DEPEND) {
  1046. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  1047. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  1048. return nullptr;
  1049. }
  1050. prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  1051. }
  1052. return prev_cnode;
  1053. }
  1054. return nullptr;
  1055. }
  1056. void ModifyInputsTensorNameListIfOperatorInfoCreated(const std::string &name, const std::string &uniqueid) {
  1057. size_t iter_ops = 0;
  1058. for (auto op : entire_costgraph->GetOperators()) {
  1059. if (op->name() == name) {
  1060. break;
  1061. }
  1062. iter_ops = iter_ops + 1;
  1063. }
  1064. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1065. for (size_t i = 0; i < input_tensor_names.size(); i++) {
  1066. for (size_t j = 0; j < input_tensor_names[i].size(); j++) {
  1067. if (input_tensor_names[i][j] == uniqueid) {
  1068. input_tensor_names[i][j] = input_tensor_names[iter_ops][0];
  1069. }
  1070. }
  1071. }
  1072. entire_costgraph->set_inputs_tensor_name_list(input_tensor_names);
  1073. }
  1074. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1075. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  1076. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  1077. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1078. << entire_costgraph->GetOperators().size() << " operators.";
  1079. } else {
  1080. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1081. }
  1082. } else {
  1083. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1084. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1085. << entire_costgraph->GetOperators().size() << " operators.";
  1086. } else {
  1087. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1088. }
  1089. }
  1090. ReshapeCostCompute(all_nodes);
  1091. auto ops = entire_costgraph->GetOperators();
  1092. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1093. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1094. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1095. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1096. }
  1097. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1098. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
  1099. std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
  1100. graph = EliminateGraph(graph, eli_list, index_list);
  1101. size_t num_device = g_device_manager->DeviceNum();
  1102. double device_memory = entire_costgraph->GetDeviceMemory();
  1103. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1104. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1105. } else {
  1106. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1107. return FAILED;
  1108. }
  1109. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list);
  1110. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1111. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1112. } else {
  1113. MS_LOG(ERROR) << "Init selected strategy failed.";
  1114. return FAILED;
  1115. }
  1116. // print the selected strategy
  1117. for (auto &op : entire_costgraph->GetOperators()) {
  1118. StrategyPtr s_strategy = op->selected_strategy();
  1119. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1120. PrintStrategy(s_strategy);
  1121. }
  1122. return SUCCESS;
  1123. }
  1124. } // namespace parallel
  1125. } // namespace mindspore