You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 53 kB

6 years ago
6 years ago
6 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_auto_parallel.h"
  17. #include <cinttypes>
  18. #include <ctime>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include <unordered_set>
  28. #include "base/core_ops.h"
  29. #include "frontend/optimizer/opt.h"
  30. #include "frontend/optimizer/optimizer.h"
  31. #include "frontend/parallel/auto_parallel/dp_algo_costmodel.h"
  32. #include "frontend/parallel/auto_parallel/edge_costmodel.h"
  33. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  34. #include "frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  35. #include "frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h"
  36. #include "frontend/parallel/auto_parallel/rec_core/rec_partition.h"
  37. #include "frontend/parallel/context.h"
  38. #include "frontend/parallel/graph_util/node_info.h"
  39. #include "frontend/parallel/graph_util/graph_info.h"
  40. #include "frontend/parallel/ops_info/reshape_info.h"
  41. #include "frontend/parallel/ops_info/tmp_identity_info.h"
  42. #include "frontend/parallel/step_parallel.h"
  43. #include "frontend/parallel/parameter_manager.h"
  44. #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  45. #include "ir/anf.h"
  46. #include "ir/param_info.h"
  47. #include "ir/tensor.h"
  48. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  49. #include "ps/util.h"
  50. #endif
  51. namespace mindspore {
  52. namespace parallel {
  53. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  54. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  55. if (ps::Util::IsRoleOfPServer() || ps::Util::IsRoleOfScheduler()) {
  56. return false;
  57. }
  58. #endif
  59. MS_EXCEPTION_IF_NULL(root);
  60. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  61. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  62. // assume no change to graph
  63. bool changes = false;
  64. // control whether use model_parallel mode
  65. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  66. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  67. return changes;
  68. }
  69. // check whether strategy_search_mode is valid
  70. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  71. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  72. // Setting searching mode: dynamic programming as default.
  73. strategy_search_mode = DYNAMIC_PROGRAMMING;
  74. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  75. }
  76. struct timeval start_time {
  77. 0
  78. }, end_time{0};
  79. (void)gettimeofday(&start_time, nullptr);
  80. if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
  81. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  82. }
  83. MS_LOG(INFO) << "Now entering step auto parallel";
  84. TOTAL_OPS = 0;
  85. AnfNodePtr ret = root->get_return();
  86. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  87. if (ParallelInit() != SUCCESS) {
  88. MS_LOG(EXCEPTION) << "Parallel init failed";
  89. }
  90. // mark the forward cnodes, parallel only care these nodes
  91. MarkForwardCNode(root);
  92. if (IsInsertVirtualOutput(root)) {
  93. InsertVirtualOutput(root, all_nodes);
  94. AnfNodePtr ret_after = root->get_return();
  95. MS_EXCEPTION_IF_NULL(ret_after);
  96. all_nodes = DeepScopedGraphSearch(ret_after);
  97. }
  98. if (FindCommunicationOp(all_nodes)) {
  99. MS_LOG(EXCEPTION) << "The graph contain communication op";
  100. }
  101. // search parallelization strategy
  102. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  103. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  104. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  105. }
  106. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  107. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  108. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  109. }
  110. } else {
  111. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  112. }
  113. (void)gettimeofday(&end_time, nullptr);
  114. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  115. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  116. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  117. root->set_flag(AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  118. return changes;
  119. }
  120. bool IsElementWiseOperator(const std::string &op_name) {
  121. // clang-format off
  122. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH,
  123. SOFTMAX, LOG_SOFTMAX, RELU,
  124. SQRT, CAST, POW,
  125. EXP, LOG, COS,
  126. ACOS, LOGICALNOT, NEG,
  127. SQUARE, SIGMOID, ABS,
  128. ACOSH, ASIN, ASINH,
  129. ATAN, ATANH, CEIL,
  130. COSH, EXPM1, LOG1P,
  131. SIN, SINH, TAN,
  132. RSQRT, RECIPROCAL, INV,
  133. ROUND, FLOOR, SIGN,
  134. ERF, ERFC, ZEROSLIKE,
  135. ONESLIKE, BESSELI0E, MOD,
  136. ASSIGN, ASSIGN_ADD, ATAN2,
  137. DIVNONAN, LOGICALAND, ELU,
  138. LOGICALOR, RELU6, SOFTPLUS,
  139. SOFTSIGN, LESS, LESSEQUAL,
  140. BESSELI1E, GREATEREQUAL, APPROXIMATEEQUAL,
  141. REPEAT_ELEMENTS};
  142. // clang-format on
  143. auto iter = elementwise_op.find(op_name);
  144. return (iter != elementwise_op.end());
  145. }
  146. bool IsSplittableOperator(const std::string &op_name) {
  147. // clang-format off
  148. static const std::set<std::string> splittable_op =
  149. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  150. FLOORDIV, L2_NORMALIZE, ADD, MAXPOOL, AVGPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  151. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  152. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP, STACK,
  153. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT,
  154. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2, TILE, DROPOUT,
  155. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  156. EMBEDDING_LOOKUP, FUSE_BATCH_NORM_EX, SPLIT, BROADCAST_TO, ABS, ACOSH, ASIN, ASINH, ATAN, ATANH, CEIL, COSH,
  157. EXPM1, LOG1P, SIN, SINH, TAN, RSQRT, INV, RECIPROCAL, ROUND, FLOOR, SIGN, ERF, ERFC, ZEROSLIKE, ONESLIKE,
  158. BESSELI0E, BESSELI1E, FLOORMOD, ASSIGN, ASSIGN_ADD, ATAN2, DIVNONAN, LOGICALAND, LOGICALOR, ELU, RELU6, RELUV2,
  159. SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD, UNIQUE, UNSORTED_SEGMENT_SUM,
  160. UNSORTED_SEGMENT_MIN, REPEAT_ELEMENTS, TENSOR_DOT, RANGE, UNIFORM_CANDIDATE_SAMPLER, SLICE, SELECT, GATHERD,
  161. UNSORTED_SEGMENT_MAX, GATHER_ND, TOPK, SCATTER_UPDATE, VIRTUAL_OUTPUT, CONV2D_BACK_PROP_INPUT, CONV2D_TRANSPOSE,
  162. MATMUL_DDS, DSD_MATMUL};
  163. // clang-format on
  164. auto iter = splittable_op.find(op_name);
  165. return (iter != splittable_op.end());
  166. }
  167. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  168. MS_EXCEPTION_IF_NULL(cnode);
  169. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  170. if (prim_node == nullptr) {
  171. return false;
  172. }
  173. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  174. if (prim == nullptr) {
  175. return false;
  176. }
  177. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  178. if (bool_result && (prim->name() != MAKE_TUPLE) && (prim->name() != MAKE_LIST)) {
  179. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  180. } else if (prim->name() == CAST) {
  181. if (cnode->fullname_with_scope().find(OPTIMIZER_SUB_STRING) != std::string::npos) {
  182. // Do not care CASTs from optimizer
  183. return false;
  184. }
  185. return true;
  186. }
  187. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  188. }
  189. // Recording the operators appearing in a for-loop.
  190. // Currently, we assume that the operators in different for-loops are identical, and their traversal
  191. // orderings are also identical.
  192. // Therefore, we create OperatorInfo objects for the operators in a loop (say, loop-3), and reuse them in
  193. // the rest of loops (loop-2, loop-1 and loop-0)
  194. std::set<std::string> ops_in_a_loop_;
  195. // Whether two operators are in different loops; if it is true, then return true.
  196. // If at least one of the two operators is not in the loop, then return false.
  197. // If two operators are in the same loop, the return false.
  198. bool IsOperatorsInTwoSeparateLoops(const CNodePtr &a_cnode, const CNodePtr &b_cnode) {
  199. auto a_op_info = a_cnode->user_data<OperatorInfo>();
  200. MS_EXCEPTION_IF_NULL(a_op_info);
  201. auto b_op_info = b_cnode->user_data<OperatorInfo>();
  202. MS_EXCEPTION_IF_NULL(b_op_info);
  203. if ((ops_in_a_loop_.find(a_op_info->name()) == ops_in_a_loop_.end()) ||
  204. (ops_in_a_loop_.find(b_op_info->name()) == ops_in_a_loop_.end())) {
  205. return false;
  206. }
  207. size_t a_loop_index = 0, b_loop_index = 0;
  208. const auto &a_fullname = a_cnode->fullname_with_scope();
  209. if (!GetLoopIndexFromCNode(a_cnode, &a_loop_index)) {
  210. MS_LOG(EXCEPTION) << "The operator with fullname_with_scope: " << a_fullname << " was not included in the set.";
  211. }
  212. const auto &b_fullname = b_cnode->fullname_with_scope();
  213. if (!GetLoopIndexFromCNode(b_cnode, &b_loop_index)) {
  214. MS_LOG(EXCEPTION) << "The operator with fullname_with_scope: " << b_fullname << " was not included in the set.";
  215. }
  216. if (a_loop_index == b_loop_index) {
  217. return false;
  218. }
  219. return true;
  220. }
  221. // 'configured_stra_ops_' includes all operators that are configured sharding strategies.
  222. std::map<OperatorInfoPtr, StrategyPtr> configured_stra_ops_;
  223. void InitCostGraph() {
  224. if (entire_costgraph == nullptr) {
  225. entire_costgraph = std::make_shared<CostGraph>();
  226. }
  227. MS_EXCEPTION_IF_NULL(CostModelContext::GetInstance());
  228. CostModelContext::GetInstance()->PrintCostModel();
  229. entire_costgraph->Init();
  230. configured_stra_ops_.clear();
  231. }
  232. void SetStrategyToOperator(const OperatorInfoPtr &operator_info, const PrimitivePtr &prim,
  233. std::unordered_map<std::string, ValuePtr> attrs, bool is_last_nodes, StrategyMap *stra_map,
  234. const std::string &strategy_key_name) {
  235. // In this case, the configured strategy should be extracted to help setting cost
  236. StrategyPtr strategyPtr;
  237. if (StrategyFound(attrs)) {
  238. strategyPtr = parallel::ExtractStrategy(attrs[STRATEGY]);
  239. } else {
  240. strategyPtr = (*stra_map)[strategy_key_name];
  241. }
  242. if (strategyPtr != nullptr) {
  243. if (prim->name() == RESHAPE) {
  244. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  245. }
  246. const auto fully_use_devices = CostModelContext::GetInstance()->fully_use_device();
  247. // Set cost for this configured strategy
  248. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  249. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  250. } else if (fully_use_devices) {
  251. // If configured to fully use devices, then checking for the user-specified strategy
  252. int64_t used_devices = operator_info->used_devices();
  253. MS_EXCEPTION_IF_NULL(g_device_manager);
  254. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  255. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  256. if (used_devices == 1) {
  257. configured_stra_ops_.insert({operator_info, strategyPtr});
  258. return;
  259. }
  260. // 'used_devices == -1' means that 'used_devices_' is not set
  261. if ((used_devices == -1) || LongToSize(used_devices) != total_device_num) {
  262. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  263. << "but the specified strategy uses device: " << used_devices
  264. << ", total devices: " << total_device_num;
  265. }
  266. }
  267. configured_stra_ops_.insert({operator_info, strategyPtr});
  268. }
  269. }
  270. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, bool is_last_nodes,
  271. StrategyMap *stra_map) {
  272. MS_EXCEPTION_IF_NULL(prim);
  273. MS_EXCEPTION_IF_NULL(cnode);
  274. auto attrs = prim->attrs();
  275. std::vector<Shapes> shape_list = ExtractShape(cnode);
  276. if (shape_list.empty()) {
  277. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  278. }
  279. // Create an OperatorInfo instance
  280. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  281. MS_EXCEPTION_IF_NULL(operator_info);
  282. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  283. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  284. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  285. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  286. return nullptr;
  287. }
  288. // Set the data type for inputs and outputs of this OperatorInfo
  289. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  290. auto outputs_type = ExtractOutputTypeByNode(cnode);
  291. std::vector<size_t> outputs_type_length;
  292. outputs_type_length.reserve(outputs_type.size());
  293. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  294. GetLengthOfDataType);
  295. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  296. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  297. return nullptr;
  298. }
  299. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  300. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  301. return nullptr;
  302. }
  303. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  304. // ANF graph
  305. auto &inputs = cnode->inputs();
  306. std::vector<ValuePtr> input_value;
  307. for (size_t index = 1; index < inputs.size(); ++index) {
  308. if (inputs[index]->isa<ValueNode>()) {
  309. input_value.push_back(GetValueNode(inputs[index]));
  310. } else {
  311. input_value.emplace_back(nullptr);
  312. }
  313. }
  314. operator_info->set_input_value(input_value);
  315. operator_info->set_outputs_dtype(cnode->Type());
  316. operator_info->set_cnode(cnode);
  317. // key of strategy map
  318. std::string strategy_key_name = "";
  319. auto param_names = NodeParameterName(cnode, -1, 0);
  320. if (!param_names.empty()) {
  321. strategy_key_name = prim->name() + "_" + param_names[0].first;
  322. }
  323. bool load_strategy_from_ckpt =
  324. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  325. // If no strategy has been configured for this operator, then candidate strategies are generated for
  326. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  327. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  328. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  329. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  330. // BatchParallelInfo operator
  331. operator_info->ComputeBatchSplitFlagList();
  332. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  333. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  334. return nullptr;
  335. }
  336. if (ParallelContext::GetInstance()->sharding_propagation() &&
  337. (operator_info->name().find(VIRTUAL_DATA_SET_INFO) != std::string::npos)) {
  338. const auto &swc_vec = operator_info->GetStrategyCost();
  339. if (swc_vec.empty()) {
  340. MS_LOG(EXCEPTION) << "No available strategy for: " << operator_info->name();
  341. }
  342. MS_EXCEPTION_IF_NULL(swc_vec[0]->strategy_ptr);
  343. configured_stra_ops_.insert({operator_info, swc_vec[0]->strategy_ptr});
  344. }
  345. // If 'approximation' is enabled, the 'strategy_cost' of each operator is approximated
  346. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  347. if (approximation) {
  348. operator_info->ApproximateStrategies();
  349. MS_LOG(INFO) << "Approximated StrategyCost for: " << operator_info->name();
  350. }
  351. } else {
  352. SetStrategyToOperator(operator_info, prim, attrs, is_last_nodes, stra_map, strategy_key_name);
  353. }
  354. return operator_info;
  355. }
  356. bool IsFindWrong(const OperatorInfoPtr current_op_ptr, const std::string &prim_name) {
  357. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  358. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  359. (current_op_ptr->name().find(prim_name + "Info") == std::string::npos);
  360. if (prim_name == GATHERV2) {
  361. is_find_wrong = is_find_wrong && (current_op_ptr->name().find(prim_name + "PInfo") == std::string::npos);
  362. }
  363. return is_find_wrong;
  364. }
  365. // Using CNode's UniqueIds to construct nodes
  366. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  367. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  368. // The map from CNode's UniqueId to its operatorInfo
  369. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  370. // The operator_infos in a loop
  371. std::vector<OperatorInfoPtr> operators_in_forloop;
  372. // Key: i-th loop; Value: index of 'operators_in_forloop'
  373. std::map<size_t, size_t> loop_to_ops;
  374. // extract strategy from checkpoint for multi-train
  375. StrategyMap stra_map;
  376. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  377. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  378. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  379. }
  380. }
  381. for (auto &node : all_nodes) {
  382. // NOTE: we only care about splittable Primitive operators
  383. auto cnode = node->cast<CNodePtr>();
  384. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  385. if (bool_result) {
  386. continue;
  387. }
  388. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  389. if (!IsAutoParallelCareNode(cnode)) {
  390. // Needed by rec_parser
  391. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  392. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  393. if (prev_cnode != nullptr) {
  394. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  395. }
  396. }
  397. continue;
  398. }
  399. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  400. MS_EXCEPTION_IF_NULL(prim);
  401. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  402. if (search_cnode == from_cnode_to_info.end()) {
  403. size_t loop_index = 0;
  404. bool is_in_loop = GetLoopIndexFromCNode(cnode, &loop_index);
  405. const auto single_loop = CostModelContext::GetInstance()->dp_algo_single_loop();
  406. if (single_loop && is_in_loop && (loop_to_ops[loop_index] < operators_in_forloop.size())) {
  407. const auto &current_op_ptr = operators_in_forloop[loop_to_ops[loop_index]];
  408. if (IsFindWrong(current_op_ptr, prim->name())) {
  409. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  410. << " does not match the Prim: " << prim->name()
  411. << ". The fullname_with_scope: " << cnode->fullname_with_scope();
  412. }
  413. loop_to_ops[loop_index]++;
  414. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  415. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  416. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  417. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  418. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  419. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueId(), current_op_ptr));
  420. continue;
  421. }
  422. bool is_last_nodes = IsPrimitiveCNode(cnode, prim::kPrimVirtualOutput);
  423. auto operator_info = CreateTheOperatorInfo(prim, cnode, is_last_nodes, &stra_map);
  424. if (operator_info == nullptr) {
  425. return FAILED;
  426. }
  427. // Needed by rec_parser
  428. operator_info->set_type(prim->name());
  429. operator_info->set_last_node_flag(is_last_nodes);
  430. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  431. entire_costgraph->AddOperator(operator_info);
  432. cnode->set_user_data<OperatorInfo>(operator_info);
  433. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  434. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  435. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  436. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  437. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueId(), operator_info));
  438. if (single_loop && is_in_loop) {
  439. operators_in_forloop.push_back(operator_info);
  440. ops_in_a_loop_.insert(operator_info->name());
  441. loop_to_ops[loop_index]++;
  442. }
  443. // Needed by rec_parser
  444. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  445. } else {
  446. // Two CNODEs' UniqueIds should not be equal
  447. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  448. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  449. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  450. }
  451. }
  452. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  453. return SUCCESS;
  454. }
  455. void SetOperatorToCNode(const OperatorInfoPtr &current_op_ptr, const PrimitivePtr &prim, const CNodePtr &cnode) {
  456. if (current_op_ptr == nullptr) {
  457. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  458. } else {
  459. if (IsFindWrong(current_op_ptr, prim->name())) {
  460. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  461. << " does not match the Prim: " << prim->name();
  462. }
  463. // Needed by rec_parser
  464. ModifyInputsTensorNameListIfOperatorInfoCreated(current_op_ptr->name(), cnode->UniqueId());
  465. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  466. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  467. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  468. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  469. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  470. }
  471. }
  472. // Using CNode's UniqueIdThroughCopys to construct nodes
  473. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  474. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  475. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  476. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  477. // The operator_infos in a loop
  478. std::vector<OperatorInfoPtr> operators_in_forloop;
  479. // Key: i-th loop; Value: index of 'operators_in_forloop'
  480. std::map<size_t, size_t> loop_to_ops;
  481. // extract strategy from checkpoint for multi-train
  482. StrategyMap stra_map;
  483. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
  484. StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  485. MS_LOG(WARNING) << "Load strategy checkpoint failed";
  486. return FAILED;
  487. }
  488. for (auto &node : all_nodes) {
  489. // NOTE: we only care about splittable Primitive operators
  490. auto cnode = node->cast<CNodePtr>();
  491. if ((cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)))) {
  492. continue;
  493. }
  494. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  495. if (!IsAutoParallelCareNode(cnode)) {
  496. // Needed by rec_parser
  497. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  498. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  499. if (prev_cnode != nullptr) {
  500. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  501. }
  502. }
  503. continue;
  504. }
  505. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  506. // Find the operatorInfo if it exists
  507. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  508. if (search_cnode == from_cnode_to_info.end()) {
  509. size_t loop_index = 0;
  510. bool is_in_loop = GetLoopIndexFromCNode(cnode, &loop_index);
  511. const auto single_loop = CostModelContext::GetInstance()->dp_algo_single_loop();
  512. bool is_op_created = single_loop && is_in_loop && (loop_to_ops[loop_index] < operators_in_forloop.size());
  513. if (is_op_created) {
  514. const auto &current_op_ptr = operators_in_forloop[loop_to_ops[loop_index]];
  515. if (IsFindWrong(current_op_ptr, prim->name())) {
  516. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  517. << " does not match the Prim: " << prim->name()
  518. << ". The fullname_with_scope: " << cnode->fullname_with_scope();
  519. }
  520. loop_to_ops[loop_index]++;
  521. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  522. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  523. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  524. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  525. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  526. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), current_op_ptr));
  527. continue;
  528. }
  529. // In this case, the corresponding OperatorInfo is not created, create the new one.
  530. bool is_last_nodes = IsPrimitiveCNode(cnode, prim::kPrimVirtualOutput);
  531. auto operator_info = CreateTheOperatorInfo(prim, cnode, is_last_nodes, &stra_map);
  532. MS_EXCEPTION_IF_NULL(operator_info);
  533. // Needed by rec_parser
  534. operator_info->set_type(prim->name());
  535. operator_info->set_last_node_flag(is_last_nodes);
  536. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  537. entire_costgraph->AddOperator(operator_info);
  538. cnode->set_user_data<OperatorInfo>(operator_info);
  539. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  540. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  541. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  542. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  543. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  544. if (single_loop && is_in_loop) {
  545. operators_in_forloop.push_back(operator_info);
  546. ops_in_a_loop_.insert(operator_info->name());
  547. loop_to_ops[loop_index]++;
  548. }
  549. // Needed by rec_parser
  550. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  551. } else {
  552. SetOperatorToCNode(search_cnode->second, prim, cnode);
  553. }
  554. }
  555. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  556. return SUCCESS;
  557. }
  558. void CreateEdgeBetweenTwoOps(const OperatorInfoPtr &prev_op_info, const OperatorInfoPtr &node_op_info,
  559. const CNodePtr &cnode, const CNodePtr &prev_cnode, const PrimitivePtr &prim,
  560. const PrimitivePtr &prev_prim, size_t output_index, size_t input_index,
  561. size_t *edge_count) {
  562. std::string edge_name = prev_op_info->name() + OPERATOR_TO_OPERATOR_CONNECTOR + node_op_info->name();
  563. // If the edge between these two operators already has been added, then the edge will not be added again.
  564. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, input_index - 1)) {
  565. return;
  566. }
  567. EdgePtr edge_ptr;
  568. MS_LOG(INFO) << "Creating edge: " << edge_name;
  569. if (IsOperatorsInTwoSeparateLoops(prev_cnode, cnode)) {
  570. MS_LOG(INFO) << "prev_cnode_fullname: " << prev_cnode->fullname_with_scope()
  571. << ", cnode_fullname: " << cnode->fullname_with_scope();
  572. MS_LOG(INFO) << "The two operators in two separate for-loops, thus skip the edge.";
  573. return;
  574. }
  575. const auto stra_follow = CostModelContext::GetInstance()->elementwise_stra_follow();
  576. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  577. (stra_follow && IsElementWiseOperator(prev_prim->name()));
  578. if (follow_strategy) {
  579. // Redistribution in not allowed on the edge.
  580. // Elementwise operators have the same strategy as their previous operators.
  581. edge_ptr =
  582. std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, input_index - 1, false, true);
  583. } else {
  584. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, input_index - 1, false);
  585. }
  586. // Init costs for this edge
  587. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  588. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  589. }
  590. node_op_info->AddPrevEdge(edge_ptr);
  591. prev_op_info->AddSuccEdge(edge_ptr);
  592. entire_costgraph->AddEdge(prev_op_info, node_op_info, edge_ptr);
  593. if (ParallelContext::GetInstance()->sharding_propagation() && (prev_prim->name() == CAST) &&
  594. (configured_stra_ops_.find(node_op_info) != configured_stra_ops_.end())) {
  595. const auto next_op_stra = configured_stra_ops_[node_op_info];
  596. const auto cast_stra = edge_ptr->GetPrevOpStrategyByNextOpStrategyWithZeroComm(next_op_stra);
  597. if (cast_stra == nullptr) {
  598. MS_LOG(EXCEPTION) << "No available strategy for: " << prev_op_info->name();
  599. }
  600. prev_op_info->ClearStrategyCost();
  601. if (prev_op_info->SetCostUnderStrategy(cast_stra) != SUCCESS) {
  602. MS_LOG(EXCEPTION) << "Failure: operator " << prev_op_info->name() << " SetCostUnderStrategy failed";
  603. }
  604. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  605. MS_LOG(EXCEPTION) << "Edge cost re-initialization failed.";
  606. }
  607. MS_LOG(INFO) << "Set strategy for: " << prev_op_info->name() << " under the strategy of: " << node_op_info->name();
  608. configured_stra_ops_.insert({prev_op_info, cast_stra});
  609. }
  610. MS_LOG(INFO) << "Successfully adding the edge between " << prev_op_info->name() << " and " << node_op_info->name();
  611. (*edge_count)++;
  612. }
  613. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  614. // Step 2
  615. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  616. for (auto &node : all_nodes) {
  617. auto cnode = node->cast<CNodePtr>();
  618. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  619. continue;
  620. }
  621. auto &inputs = cnode->inputs();
  622. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  623. if (!IsAutoParallelCareNode(cnode)) {
  624. continue;
  625. }
  626. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  627. size_t edge_count = 0;
  628. auto node_op_info = cnode->user_data<OperatorInfo>();
  629. for (size_t i = 1; i < inputs.size(); ++i) {
  630. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  631. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  632. if (bool_result_prev_cnode) {
  633. continue;
  634. }
  635. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  636. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  637. size_t output_index = 0;
  638. while ((IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == prim::kTupleGetItem) ||
  639. (prev_prim->name() == DEPEND)) {
  640. if (IsAutoParallelCareNode(prev_cnode)) {
  641. auto prev_op_info = prev_cnode->user_data<OperatorInfo>();
  642. CreateEdgeBetweenTwoOps(prev_op_info, node_op_info, cnode, prev_cnode, prim, prev_prim, output_index, i,
  643. &edge_count);
  644. break;
  645. } else if (prev_prim->name() == prim::kTupleGetItem) {
  646. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  647. // this 'tuple_getitem'
  648. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  649. output_index = LongToSize(GetValue<int64_t>(GetValueNode(prev_cnode->input(2))));
  650. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  651. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  652. if (bool_result_tuple) {
  653. break;
  654. }
  655. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  656. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  657. if (!IsAutoParallelCareNode(prev_cnode)) {
  658. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  659. }
  660. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  661. << "and creating an edge between the Operator before "
  662. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  663. } else if (prev_prim->name() == DEPEND) {
  664. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  665. // this 'depend'
  666. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  667. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  668. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  669. if (bool_result_depend) {
  670. break;
  671. }
  672. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  673. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  674. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  675. << "and creating an edge between the Operator before "
  676. << "'depend' and the Operator after 'depend'.";
  677. }
  678. }
  679. }
  680. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << node_op_info->name();
  681. }
  682. // If 'approximation' is enabled, the edges need to be checked have effective costs.
  683. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  684. if (approximation) {
  685. entire_costgraph->CheckApproximateCostGraphEdges();
  686. }
  687. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  688. }
  689. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  690. // Step 3
  691. for (auto &node : all_nodes) {
  692. ParameterUsersInfo parameter_users_info = FindParameterUsers(node, IsAutoParallelCareNode);
  693. auto parameter_name = parameter_users_info.first;
  694. auto target_parameter = parameter_users_info.second.first;
  695. auto target_set = parameter_users_info.second.second;
  696. if (target_set.size() <= 1) {
  697. continue;
  698. }
  699. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  700. std::set<std::string> target_without_duplicate;
  701. for (auto &target : target_set) {
  702. auto target_cnode = target.first->cast<CNodePtr>();
  703. auto input_index = target.second;
  704. (void)target_without_duplicate.insert(std::to_string(input_index) +
  705. target_cnode->user_data<OperatorInfo>()->name());
  706. }
  707. if (target_without_duplicate.size() <= 1) {
  708. continue;
  709. }
  710. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  711. OperatorInfoPtr tmp_identity_ptr;
  712. bool new_identity = false;
  713. std::string tmp_identity_name;
  714. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  715. if (returned_identity != nullptr) {
  716. // In this case, the TmpIdentityInfo instance has already been created
  717. new_identity = false;
  718. tmp_identity_ptr = returned_identity;
  719. tmp_identity_name = tmp_identity_ptr->name();
  720. } else {
  721. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  722. new_identity = true;
  723. // 1) extract input shape from this Parameter
  724. MS_EXCEPTION_IF_NULL(target_parameter);
  725. AbstractBasePtr abstract = target_parameter->abstract();
  726. if (abstract == nullptr) {
  727. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  728. }
  729. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  730. if (input_shape == nullptr) {
  731. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  732. }
  733. Shape shape = input_shape->shape();
  734. Shapes inputs_shape = {shape};
  735. Shapes outputs_shape = {shape};
  736. // 2) init the attr
  737. std::unordered_map<std::string, ValuePtr> attr = {};
  738. // Create the TmpIdentity instance
  739. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  740. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  741. TOTAL_OPS++;
  742. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  743. // Set the parameter and type lengths for inputs and outputs
  744. std::vector<bool> is_parameter;
  745. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  746. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  747. is_parameter.push_back(ParameterRequireGrad(casted_target_parameter));
  748. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  749. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  750. }
  751. auto node_type = target_parameter->Type();
  752. if (node_type->isa<mindspore::TensorType>()) {
  753. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  754. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  755. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  756. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  757. }
  758. } else {
  759. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  760. }
  761. // Generate strategies for this TmpIdentityInfo instance;
  762. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  763. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  764. }
  765. }
  766. // A flag recording whether new edges have been created or not
  767. bool add_identity_edge = false;
  768. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  769. for (auto &target : target_set) {
  770. auto target_cnode = target.first->cast<CNodePtr>();
  771. auto input_index = target.second;
  772. auto target_op_info = target_cnode->user_data<OperatorInfo>();
  773. std::string edge_name = std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_op_info->name();
  774. // If the edge between these two operators already has been added, then the edge will not be added again.
  775. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, LongToSize(input_index - 1))) {
  776. continue;
  777. }
  778. std::shared_ptr<Edge> edge_ptr =
  779. std::make_shared<Edge>(edge_name, tmp_identity_ptr, target_op_info, 0, input_index - 1, false, true);
  780. // If 'approximation' is enabled, the edges need to be checked have effective costs.
  781. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  782. if (approximation) {
  783. target_op_info->ExactStrategiesAndRelatedEdges();
  784. }
  785. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  786. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  787. }
  788. target_op_info->AddPrevEdge(edge_ptr);
  789. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  790. entire_costgraph->AddEdge(tmp_identity_ptr, target_op_info, edge_ptr);
  791. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  792. << target_op_info->name();
  793. add_identity_edge = true;
  794. }
  795. if (new_identity && add_identity_edge) {
  796. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  797. entire_costgraph->AddOperator(tmp_identity_ptr);
  798. }
  799. }
  800. }
  801. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  802. std::unordered_set<std::string> op_cache;
  803. for (auto node : all_nodes) {
  804. auto cnode = node->cast<CNodePtr>();
  805. if (!FindReshape(cnode, &op_cache)) {
  806. continue;
  807. }
  808. MS_ASSERT(cnode->inputs().size() == 3);
  809. // get previous node's strategy_cost_
  810. auto pre_node = cnode->input(1);
  811. if (IsPrimitiveCNode(pre_node, prim::kPrimLoad)) {
  812. pre_node = pre_node->cast<CNodePtr>()->input(1);
  813. }
  814. int64_t out_index = 0;
  815. OperatorInfoPtr pre_operator_info;
  816. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  817. auto operator_info = cnode->user_data<OperatorInfo>();
  818. if (pre_node->isa<Parameter>()) {
  819. auto reshape_info1 = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  820. reshape_info1->SetCostForReshapeWithParameter();
  821. pre_operator_info = reshape_info1;
  822. pre_stra_costs = reshape_info1->strategy_cost();
  823. } else {
  824. if (!FindReshapePreNodeStraCosts(pre_node, &pre_operator_info, &out_index, 0)) {
  825. MS_LOG(EXCEPTION) << "FindReshapePreNodeStraCosts for reshape failed";
  826. }
  827. pre_stra_costs = pre_operator_info->strategy_cost();
  828. }
  829. // get next node's strategy_cost_
  830. int64_t in_index = 0;
  831. OperatorInfoPtr next_operator_info;
  832. bool is_next_reshape = false;
  833. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  834. bool find_next_node = FindReshapeNextNodeStraCosts(cnode, &next_operator_info, &in_index, &is_next_reshape, 0);
  835. if (!find_next_node) {
  836. MS_LOG(INFO) << "FindReshapeNextNodeStraCosts for reshape failed";
  837. }
  838. // set input_layout and output_layout for reshape.
  839. // init reshape and set cost for each input_layout and output_layout.
  840. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  841. reshape_info->set_pre_operator_name(pre_operator_info->name());
  842. reshape_info->set_pre_operator_index(out_index);
  843. if (find_next_node) {
  844. next_stra_costs = next_operator_info->strategy_cost();
  845. reshape_info->set_next_operator_name(next_operator_info->name());
  846. reshape_info->set_next_operator_index(in_index);
  847. }
  848. bool is_prev_param = pre_node->isa<Parameter>();
  849. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param,
  850. is_next_reshape) != SUCCESS) {
  851. MS_LOG(EXCEPTION) << "reshape generate strategy_costs failed!";
  852. }
  853. }
  854. }
  855. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  856. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  857. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  858. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  859. // for each OperatorInfo;
  860. // Step 1.1: Deal with 'Reshape':
  861. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  862. // layout as its output layout.
  863. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  864. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  865. // for each edge, based on the strategies of two OperatorInfos;
  866. // Step 3: Augment the costgraph:
  867. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  868. // operator for this Parameter, and add an edge for the use of this Parameter by each
  869. // subsequent operator;
  870. // Step 3.1: Calculate memory usage:
  871. // note the memory usage calculation is different in training phase and inference phase.
  872. // Step 4: Run the strategy searching algorithm:
  873. // If 'sharding_propagation' is configured to be true, then the configured-sharding-strategies will propagate
  874. // to the non-configured operators, with the goal of minimizing redistribution cost.
  875. // Otherwise, DP algorithm is used to search strategy of the costgraph. Note that there may be several connected
  876. // components in the costgraph, and the DP algorithm runs on each of them.
  877. //
  878. // OUTPUT: the determined strategy for each operator.
  879. InitCostGraph();
  880. // Step 1
  881. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  882. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  883. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  884. << entire_costgraph->GetOperators().size() << " operators.";
  885. } else {
  886. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  887. }
  888. } else {
  889. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  890. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  891. << entire_costgraph->GetOperators().size() << " operators.";
  892. } else {
  893. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  894. }
  895. }
  896. // Step 1.1
  897. ReshapeCostCompute(all_nodes);
  898. // Step 2
  899. ConstructCostGraphEdges(all_nodes);
  900. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  901. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  902. // Step 3: Augment the costgraph.
  903. AugmentCostGraph(all_nodes);
  904. auto num_ops = entire_costgraph->GetOperators().size();
  905. SetOpsNumToExecutor(num_ops);
  906. auto num_edges = entire_costgraph->GetNumEdges();
  907. MS_LOG(INFO) << "After the augmenting procedure, there are " << num_ops << " operators, and " << num_edges
  908. << " edges.";
  909. // Step 3.1: Calculate the memory usage
  910. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  911. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  912. }
  913. // Step 4: run the strategy searching algorithm
  914. if (ParallelContext::GetInstance()->sharding_propagation()) {
  915. entire_costgraph->StrategyPropagate(configured_stra_ops_);
  916. configured_stra_ops_.clear();
  917. } else if (GetStrategy(entire_costgraph) != SUCCESS) {
  918. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  919. return FAILED;
  920. }
  921. MS_LOG(INFO) << "Searching strategy succeeded.";
  922. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  923. MS_LOG(INFO) << "Init selected strategy succeeded.";
  924. } else {
  925. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  926. }
  927. // print the selected strategy
  928. for (auto &op : entire_costgraph->GetOperators()) {
  929. StrategyPtr s_strategy = op->selected_strategy();
  930. MS_LOG(INFO) << op->name() << " : The strategy is:";
  931. PrintStrategy(s_strategy);
  932. }
  933. ops_in_a_loop_.clear();
  934. return SUCCESS;
  935. }
  936. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  937. std::vector<std::vector<std::string>> input_tensor_names) {
  938. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  939. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  940. if (it->first == input_tensor_names[j][k]) {
  941. input_tensor_names[j][k] = it->second;
  942. break;
  943. }
  944. }
  945. }
  946. return input_tensor_names;
  947. }
  948. CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node) {
  949. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  950. if (prim->name() == prim::kTupleGetItem || prim->name() == DEPEND) {
  951. auto prev_cnode = cnode->input(1)->cast<CNodePtr>();
  952. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  953. return nullptr;
  954. }
  955. auto prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  956. while (prev_prim->name() == prim::kTupleGetItem || prev_prim->name() == DEPEND) {
  957. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  958. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  959. return nullptr;
  960. }
  961. prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  962. }
  963. return prev_cnode;
  964. }
  965. return nullptr;
  966. }
  967. void ModifyInputsTensorNameListIfOperatorInfoCreated(const std::string &name, const std::string &uniqueid) {
  968. size_t iter_ops = 0;
  969. for (auto op : entire_costgraph->GetOperators()) {
  970. if (op->name() == name) {
  971. break;
  972. }
  973. iter_ops = iter_ops + 1;
  974. }
  975. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  976. for (size_t i = 0; i < input_tensor_names.size(); i++) {
  977. for (size_t j = 0; j < input_tensor_names[i].size(); j++) {
  978. if (input_tensor_names[i][j] == uniqueid) {
  979. input_tensor_names[i][j] = input_tensor_names[iter_ops][0];
  980. }
  981. }
  982. }
  983. entire_costgraph->set_inputs_tensor_name_list(input_tensor_names);
  984. }
  985. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  986. InitCostGraph();
  987. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  988. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  989. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  990. << entire_costgraph->GetOperators().size() << " operators.";
  991. } else {
  992. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  993. }
  994. } else {
  995. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  996. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  997. << entire_costgraph->GetOperators().size() << " operators.";
  998. } else {
  999. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1000. }
  1001. }
  1002. ReshapeCostCompute(all_nodes);
  1003. auto ops = entire_costgraph->GetOperators();
  1004. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1005. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1006. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1007. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1008. }
  1009. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1010. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
  1011. std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
  1012. graph = EliminateGraph(graph, eli_list, index_list);
  1013. size_t num_device = g_device_manager->DeviceNum();
  1014. const auto device_memory = CostModelContext::GetInstance()->device_memory_capacity();
  1015. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1016. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1017. } else {
  1018. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1019. return FAILED;
  1020. }
  1021. bool is_training = true;
  1022. if (!root->has_flag(TRAINING)) {
  1023. is_training = false;
  1024. }
  1025. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list, is_training);
  1026. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1027. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1028. } else {
  1029. MS_LOG(ERROR) << "Init selected strategy failed.";
  1030. return FAILED;
  1031. }
  1032. // print the selected strategy
  1033. for (auto &op : entire_costgraph->GetOperators()) {
  1034. StrategyPtr s_strategy = op->selected_strategy();
  1035. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1036. PrintStrategy(s_strategy);
  1037. }
  1038. return SUCCESS;
  1039. }
  1040. } // namespace parallel
  1041. } // namespace mindspore