You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 54 kB

4 years ago
6 years ago
6 years ago
6 years ago
5 years ago
6 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_auto_parallel.h"
  17. #include <cinttypes>
  18. #include <ctime>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <utility>
  25. #include <vector>
  26. #include "utils/hash_map.h"
  27. #include "utils/hash_set.h"
  28. #include "base/core_ops.h"
  29. #include "frontend/optimizer/opt.h"
  30. #include "frontend/optimizer/optimizer.h"
  31. #include "frontend/parallel/auto_parallel/dp_algo_costmodel.h"
  32. #include "frontend/parallel/auto_parallel/edge_costmodel.h"
  33. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  34. #include "frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  35. #include "frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h"
  36. #include "frontend/parallel/auto_parallel/rec_core/rec_partition.h"
  37. #include "frontend/parallel/context.h"
  38. #include "frontend/parallel/graph_util/node_info.h"
  39. #include "frontend/parallel/graph_util/graph_info.h"
  40. #include "frontend/parallel/ops_info/reshape_info.h"
  41. #include "frontend/parallel/ops_info/tmp_identity_info.h"
  42. #include "frontend/parallel/step_parallel.h"
  43. #include "frontend/parallel/parameter_manager.h"
  44. #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  45. #include "ir/anf.h"
  46. #include "ir/param_info.h"
  47. #include "ir/tensor.h"
  48. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  49. #include "ps/util.h"
  50. #endif
  51. namespace mindspore {
  52. namespace parallel {
  53. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  54. #if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
  55. if (ps::Util::IsRoleOfPServer() || ps::Util::IsRoleOfScheduler()) {
  56. return false;
  57. }
  58. #endif
  59. MS_EXCEPTION_IF_NULL(root);
  60. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  61. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  62. // assume no change to graph
  63. bool changes = false;
  64. // control whether use model_parallel mode
  65. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  66. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  67. return changes;
  68. }
  69. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  70. MS_LOG(INFO) << "search_mode: " << strategy_search_mode;
  71. struct timeval start_time {
  72. 0
  73. }, end_time{0};
  74. (void)gettimeofday(&start_time, nullptr);
  75. #ifdef ENABLE_DUMP_IR
  76. if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
  77. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  78. }
  79. #endif
  80. MS_LOG(INFO) << "Now entering step auto parallel";
  81. TOTAL_OPS = 0;
  82. AnfNodePtr ret = root->get_return();
  83. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  84. if (ParallelInit() != SUCCESS) {
  85. MS_LOG(EXCEPTION) << "Parallel init failed";
  86. }
  87. // mark the forward cnodes, parallel only care these nodes
  88. MarkForwardCNode(root);
  89. if (IsInsertVirtualOutput(root)) {
  90. InsertVirtualOutput(root, all_nodes);
  91. AnfNodePtr ret_after = root->get_return();
  92. MS_EXCEPTION_IF_NULL(ret_after);
  93. all_nodes = DeepScopedGraphSearch(ret_after);
  94. }
  95. if (FindCommunicationOp(all_nodes)) {
  96. MS_LOG(EXCEPTION) << "The graph contain communication op";
  97. }
  98. // search parallelization strategy
  99. if ((strategy_search_mode == DYNAMIC_PROGRAMMING) || (strategy_search_mode == SHARDING_PROPAGATION)) {
  100. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  101. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using " << strategy_search_mode
  102. << " searching mode";
  103. }
  104. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  105. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  106. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  107. }
  108. } else {
  109. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected: " << strategy_search_mode;
  110. }
  111. (void)gettimeofday(&end_time, nullptr);
  112. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  113. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  114. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  115. root->set_flag(AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  116. return changes;
  117. }
  118. bool IsElementWiseOperator(const std::string &op_name) {
  119. // clang-format off
  120. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH,
  121. SOFTMAX, LOG_SOFTMAX, RELU,
  122. SQRT, CAST, POW,
  123. EXP, LOG, COS,
  124. ACOS, LOGICALNOT, NEG,
  125. SQUARE, SIGMOID, ABS,
  126. ACOSH, ASIN, ASINH,
  127. ATAN, ATANH, CEIL,
  128. COSH, EXPM1, LOG1P,
  129. SIN, SINH, TAN,
  130. RSQRT, RECIPROCAL, INV,
  131. ROUND, FLOOR, SIGN,
  132. ERF, ERFC, ZEROSLIKE,
  133. ONESLIKE, BESSELI0E, MOD,
  134. ASSIGN, ASSIGN_ADD, ATAN2,
  135. DIVNONAN, LOGICALAND, ELU,
  136. LOGICALOR, RELU6, SOFTPLUS,
  137. SOFTSIGN, LESS, LESSEQUAL,
  138. BESSELI1E, GREATEREQUAL, APPROXIMATEEQUAL,
  139. REPEAT_ELEMENTS};
  140. // clang-format on
  141. auto iter = elementwise_op.find(op_name);
  142. return (iter != elementwise_op.end());
  143. }
  144. bool IsSplittableOperator(const std::string &op_name) {
  145. // clang-format off
  146. static const std::set<std::string> splittable_op =
  147. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  148. FLOORDIV, L2_NORMALIZE, ADD, MAXPOOL, AVGPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  149. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  150. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP, STACK,
  151. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT,
  152. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2, TILE, DROPOUT,
  153. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  154. EMBEDDING_LOOKUP, FUSE_BATCH_NORM_EX, SPLIT, BROADCAST_TO, ABS, ACOSH, ASIN, ASINH, ATAN, ATANH, CEIL, COSH,
  155. EXPM1, LOG1P, SIN, SINH, TAN, RSQRT, INV, RECIPROCAL, ROUND, FLOOR, SIGN, ERF, ERFC, ZEROSLIKE, ONESLIKE,
  156. BESSELI0E, BESSELI1E, FLOORMOD, ASSIGN, ASSIGN_ADD, ATAN2, DIVNONAN, LOGICALAND, LOGICALOR, ELU, RELU6, RELUV2,
  157. SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD, UNIQUE, UNSORTED_SEGMENT_SUM,
  158. UNSORTED_SEGMENT_MIN, REPEAT_ELEMENTS, TENSOR_DOT, RANGE, UNIFORM_CANDIDATE_SAMPLER, SLICE, SELECT, GATHERD,
  159. UNSORTED_SEGMENT_MAX, GATHER_ND, TOPK, SCATTER_UPDATE, VIRTUAL_OUTPUT, CONV2D_BACK_PROP_INPUT, CONV2D_TRANSPOSE,
  160. MATMUL_DDS, DSD_MATMUL, UNIFORMREAL, RESIZE_BILINEAR, RESIZE_NEAREST_NEIGHBOR};
  161. // clang-format on
  162. auto iter = splittable_op.find(op_name);
  163. return (iter != splittable_op.end());
  164. }
  165. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  166. MS_EXCEPTION_IF_NULL(cnode);
  167. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  168. if (prim_node == nullptr) {
  169. return false;
  170. }
  171. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  172. if (prim == nullptr) {
  173. return false;
  174. }
  175. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  176. if (bool_result && (prim->name() != MAKE_TUPLE) && (prim->name() != MAKE_LIST)) {
  177. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  178. } else if (prim->name() == CAST) {
  179. if (cnode->fullname_with_scope().find(OPTIMIZER_SUB_STRING) != std::string::npos) {
  180. // Do not care CASTs from optimizer
  181. return false;
  182. }
  183. return true;
  184. }
  185. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  186. }
  187. // Recording the operators appearing in a for-loop.
  188. // Currently, we assume that the operators in different for-loops are identical, and their traversal
  189. // orderings are also identical.
  190. // Therefore, we create OperatorInfo objects for the operators in a loop (say, loop-3), and reuse them in
  191. // the rest of loops (loop-2, loop-1 and loop-0)
  192. std::set<std::string> ops_in_a_loop_;
  193. // Whether two operators are in different loops; if it is true, then return true.
  194. // If at least one of the two operators is not in the loop, then return false.
  195. // If two operators are in the same loop, the return false.
  196. bool IsOperatorsInTwoSeparateLoops(const CNodePtr &a_cnode, const CNodePtr &b_cnode) {
  197. auto a_op_info = a_cnode->user_data<OperatorInfo>();
  198. MS_EXCEPTION_IF_NULL(a_op_info);
  199. auto b_op_info = b_cnode->user_data<OperatorInfo>();
  200. MS_EXCEPTION_IF_NULL(b_op_info);
  201. if ((ops_in_a_loop_.find(a_op_info->name()) == ops_in_a_loop_.end()) ||
  202. (ops_in_a_loop_.find(b_op_info->name()) == ops_in_a_loop_.end())) {
  203. return false;
  204. }
  205. size_t a_loop_index = 0, b_loop_index = 0;
  206. const auto &a_fullname = a_cnode->fullname_with_scope();
  207. if (!GetLoopIndexFromCNode(a_cnode, &a_loop_index)) {
  208. MS_LOG(EXCEPTION) << "The operator with fullname_with_scope: " << a_fullname << " was not included in the set.";
  209. }
  210. const auto &b_fullname = b_cnode->fullname_with_scope();
  211. if (!GetLoopIndexFromCNode(b_cnode, &b_loop_index)) {
  212. MS_LOG(EXCEPTION) << "The operator with fullname_with_scope: " << b_fullname << " was not included in the set.";
  213. }
  214. if (a_loop_index == b_loop_index) {
  215. return false;
  216. }
  217. return true;
  218. }
  219. // 'configured_stra_ops_' includes all operators that are configured sharding strategies.
  220. std::map<OperatorInfoPtr, StrategyPtr> configured_stra_ops_;
  221. void InitCostGraph() {
  222. if (entire_costgraph == nullptr) {
  223. entire_costgraph = std::make_shared<CostGraph>();
  224. }
  225. MS_EXCEPTION_IF_NULL(CostModelContext::GetInstance());
  226. CostModelContext::GetInstance()->PrintCostModel();
  227. entire_costgraph->Init();
  228. configured_stra_ops_.clear();
  229. }
  230. void SetStrategyToOperator(const OperatorInfoPtr &operator_info, const PrimitivePtr &prim,
  231. mindspore::HashMap<std::string, ValuePtr> attrs, bool, StrategyMap *stra_map,
  232. const std::string &strategy_key_name) {
  233. // In this case, the configured strategy should be extracted to help setting cost
  234. StrategyPtr strategyPtr;
  235. if (StrategyFound(attrs)) {
  236. strategyPtr = parallel::ExtractStrategy(attrs[IN_STRATEGY]);
  237. } else {
  238. strategyPtr = (*stra_map)[strategy_key_name];
  239. }
  240. if (strategyPtr == nullptr) {
  241. return;
  242. }
  243. if (prim->name() == RESHAPE) {
  244. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  245. return;
  246. }
  247. // Set cost for this configured strategy
  248. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  249. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  250. return;
  251. }
  252. const auto fully_use_devices = CostModelContext::GetInstance()->fully_use_device();
  253. if (fully_use_devices) {
  254. // If configured to fully use devices, then checking for the user-specified strategy
  255. int64_t used_devices = operator_info->used_devices();
  256. MS_EXCEPTION_IF_NULL(g_device_manager);
  257. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  258. // 'used_devices == -1' means that 'used_devices_' is not set
  259. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  260. if (used_devices == -1 || (used_devices != 1 && LongToSize(used_devices) != total_device_num)) {
  261. MS_LOG(EXCEPTION) << "In current configuration 'fully_use_devices' = True, "
  262. << "but the specified strategy uses device: " << used_devices
  263. << ", total devices: " << total_device_num
  264. << ", try to set 'set_algo_parameters(fully_use_devices=False)' "
  265. "in package 'mindspore.parallel'.";
  266. }
  267. }
  268. (void)configured_stra_ops_.emplace(operator_info, strategyPtr);
  269. }
  270. void ApplyApproximationForNode(const OperatorInfoPtr &operator_info) {
  271. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  272. if (approximation) {
  273. operator_info->ApproximateStrategies();
  274. MS_LOG(INFO) << "Approximated StrategyCost for: " << operator_info->name();
  275. }
  276. }
  277. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, bool is_last_nodes,
  278. StrategyMap *stra_map) {
  279. MS_EXCEPTION_IF_NULL(prim);
  280. MS_EXCEPTION_IF_NULL(cnode);
  281. auto attrs = prim->attrs();
  282. std::vector<Shapes> shape_list = ExtractShape(cnode);
  283. if (shape_list.empty()) {
  284. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  285. }
  286. // Create an OperatorInfo instance
  287. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  288. MS_EXCEPTION_IF_NULL(operator_info);
  289. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  290. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  291. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  292. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  293. return nullptr;
  294. }
  295. // Set the data type for inputs and outputs of this OperatorInfo
  296. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  297. auto outputs_type = ExtractOutputTypeByNode(cnode);
  298. std::vector<size_t> outputs_type_length;
  299. outputs_type_length.reserve(outputs_type.size());
  300. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  301. GetLengthOfDataType);
  302. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  303. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  304. return nullptr;
  305. }
  306. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  307. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  308. return nullptr;
  309. }
  310. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  311. // ANF graph
  312. auto &inputs = cnode->inputs();
  313. std::vector<ValuePtr> input_value;
  314. for (size_t index = 1; index < inputs.size(); ++index) {
  315. if (inputs[index]->isa<ValueNode>()) {
  316. input_value.push_back(GetValueNode(inputs[index]));
  317. } else {
  318. input_value.emplace_back(nullptr);
  319. }
  320. }
  321. operator_info->set_input_value(input_value);
  322. operator_info->set_outputs_dtype(cnode->Type());
  323. operator_info->set_cnode(cnode);
  324. // key of strategy map
  325. std::string strategy_key_name = "";
  326. auto param_names = NodeParameterName(cnode, -1, 0);
  327. if (!param_names.empty()) {
  328. strategy_key_name = prim->name() + "_" + param_names[0].first;
  329. }
  330. bool load_strategy_from_ckpt =
  331. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  332. // If no strategy has been configured for this operator, then candidate strategies are generated for
  333. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  334. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  335. if ((StrategyFound(attrs) && prim->name() != CAST) || load_strategy_from_ckpt) {
  336. SetStrategyToOperator(operator_info, prim, attrs, is_last_nodes, stra_map, strategy_key_name);
  337. return operator_info;
  338. }
  339. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  340. // BatchParallelInfo operator
  341. operator_info->ComputeBatchSplitFlagList();
  342. bool retGenStra;
  343. if (AttrFound(attrs, STRATEGY_GEN_MODE) && GetValue<std::string>(attrs[STRATEGY_GEN_MODE]) == DATA_PARALLEL) {
  344. MS_LOG(INFO) << "generating batch parallel strategy...";
  345. StrategyPtr strategyPtr = parallel::GenerateBatchParallelStrategy(operator_info, prim);
  346. retGenStra = operator_info->SetCostUnderStrategy(strategyPtr);
  347. } else {
  348. MS_LOG(INFO) << "auto-searching strategy...";
  349. retGenStra = operator_info->GenerateStrategies(0);
  350. }
  351. if (retGenStra != SUCCESS) {
  352. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  353. return nullptr;
  354. }
  355. bool use_sp_and_dataset = ((ParallelContext::GetInstance()->strategy_search_mode() == SHARDING_PROPAGATION) ||
  356. (ParallelContext::GetInstance()->sharding_propagation())) &&
  357. (operator_info->name().find(VIRTUAL_DATA_SET_INFO) != std::string::npos);
  358. if (use_sp_and_dataset) {
  359. const auto &swc_vec = operator_info->GetStrategyCost();
  360. if (swc_vec.empty()) {
  361. MS_LOG(EXCEPTION) << "No available strategy for: " << operator_info->name();
  362. }
  363. MS_EXCEPTION_IF_NULL(swc_vec[0]->strategy_ptr);
  364. (void)configured_stra_ops_.emplace(operator_info, swc_vec[0]->strategy_ptr);
  365. }
  366. // If 'approximation' is enabled, the 'strategy_cost' of each operator is approximated
  367. ApplyApproximationForNode(operator_info);
  368. return operator_info;
  369. }
  370. bool IsFindWrong(const OperatorInfoPtr current_op_ptr, const std::string &prim_name) {
  371. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  372. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  373. (current_op_ptr->name().find(prim_name + "Info") == std::string::npos);
  374. if (prim_name == GATHERV2) {
  375. is_find_wrong = is_find_wrong && (current_op_ptr->name().find(prim_name + "PInfo") == std::string::npos);
  376. }
  377. return is_find_wrong;
  378. }
  379. // Using CNode's UniqueIds to construct nodes
  380. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  381. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  382. // The map from CNode's UniqueId to its operatorInfo
  383. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  384. // The operator_infos in a loop
  385. std::vector<OperatorInfoPtr> operators_in_forloop;
  386. // Key: i-th loop; Value: index of 'operators_in_forloop'
  387. std::map<size_t, size_t> loop_to_ops;
  388. // extract strategy from checkpoint for multi-train
  389. StrategyMap stra_map;
  390. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  391. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  392. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  393. }
  394. }
  395. for (auto &node : all_nodes) {
  396. // NOTE: we only care about splittable Primitive operators
  397. auto cnode = node->cast<CNodePtr>();
  398. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  399. if (bool_result) {
  400. continue;
  401. }
  402. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  403. if (!IsAutoParallelCareNode(cnode)) {
  404. // Needed by rec_parser
  405. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  406. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  407. if (prev_cnode != nullptr) {
  408. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  409. }
  410. }
  411. continue;
  412. }
  413. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  414. MS_EXCEPTION_IF_NULL(prim);
  415. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  416. if (search_cnode == from_cnode_to_info.end()) {
  417. size_t loop_index = 0;
  418. bool is_in_loop = GetLoopIndexFromCNode(cnode, &loop_index);
  419. const auto single_loop = CostModelContext::GetInstance()->dp_algo_single_loop();
  420. if (single_loop && is_in_loop && (loop_to_ops[loop_index] < operators_in_forloop.size())) {
  421. const auto &current_op_ptr = operators_in_forloop[loop_to_ops[loop_index]];
  422. if (IsFindWrong(current_op_ptr, prim->name())) {
  423. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  424. << " does not match the Prim: " << prim->name()
  425. << ". The fullname_with_scope: " << cnode->fullname_with_scope();
  426. }
  427. loop_to_ops[loop_index]++;
  428. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  429. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  430. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  431. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  432. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  433. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueId(), current_op_ptr));
  434. continue;
  435. }
  436. bool is_last_nodes = IsPrimitiveCNode(cnode, prim::kPrimVirtualOutput);
  437. auto operator_info = CreateTheOperatorInfo(prim, cnode, is_last_nodes, &stra_map);
  438. if (operator_info == nullptr) {
  439. return FAILED;
  440. }
  441. // Needed by rec_parser
  442. operator_info->set_type(prim->name());
  443. operator_info->set_last_node_flag(is_last_nodes);
  444. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  445. entire_costgraph->AddOperator(operator_info);
  446. cnode->set_user_data<OperatorInfo>(operator_info);
  447. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  448. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  449. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  450. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  451. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueId(), operator_info));
  452. if (single_loop && is_in_loop) {
  453. operators_in_forloop.push_back(operator_info);
  454. ops_in_a_loop_.insert(operator_info->name());
  455. loop_to_ops[loop_index]++;
  456. }
  457. // Needed by rec_parser
  458. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  459. } else {
  460. // Two CNODEs' UniqueIds should not be equal
  461. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  462. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  463. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  464. }
  465. }
  466. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  467. return SUCCESS;
  468. }
  469. void SetOperatorToCNode(const OperatorInfoPtr &current_op_ptr, const PrimitivePtr &prim, const CNodePtr &cnode) {
  470. if (current_op_ptr == nullptr) {
  471. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  472. } else {
  473. if (IsFindWrong(current_op_ptr, prim->name())) {
  474. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  475. << " does not match the Prim: " << prim->name();
  476. }
  477. // Needed by rec_parser
  478. ModifyInputsTensorNameListIfOperatorInfoCreated(current_op_ptr->name(), cnode->UniqueId());
  479. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  480. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  481. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  482. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  483. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  484. }
  485. }
  486. // Using CNode's UniqueIdThroughCopys to construct nodes
  487. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  488. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  489. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  490. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  491. // The operator_infos in a loop
  492. std::vector<OperatorInfoPtr> operators_in_forloop;
  493. // Key: i-th loop; Value: index of 'operators_in_forloop'
  494. std::map<size_t, size_t> loop_to_ops;
  495. // extract strategy from checkpoint for multi-train
  496. StrategyMap stra_map;
  497. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
  498. StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  499. MS_LOG(WARNING) << "Load strategy checkpoint failed";
  500. return FAILED;
  501. }
  502. for (auto &node : all_nodes) {
  503. // NOTE: we only care about splittable Primitive operators
  504. auto cnode = node->cast<CNodePtr>();
  505. if ((cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)))) {
  506. continue;
  507. }
  508. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  509. if (!IsAutoParallelCareNode(cnode)) {
  510. // Needed by rec_parser
  511. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  512. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  513. if (prev_cnode != nullptr) {
  514. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  515. }
  516. }
  517. continue;
  518. }
  519. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  520. // Find the operatorInfo if it exists
  521. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  522. if (search_cnode == from_cnode_to_info.end()) {
  523. size_t loop_index = 0;
  524. bool is_in_loop = GetLoopIndexFromCNode(cnode, &loop_index);
  525. const auto single_loop = CostModelContext::GetInstance()->dp_algo_single_loop();
  526. bool is_op_created = single_loop && is_in_loop && (loop_to_ops[loop_index] < operators_in_forloop.size());
  527. if (is_op_created) {
  528. const auto &current_op_ptr = operators_in_forloop[loop_to_ops[loop_index]];
  529. if (IsFindWrong(current_op_ptr, prim->name())) {
  530. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  531. << " does not match the Prim: " << prim->name()
  532. << ". The fullname_with_scope: " << cnode->fullname_with_scope();
  533. }
  534. loop_to_ops[loop_index]++;
  535. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  536. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  537. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  538. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  539. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  540. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), current_op_ptr));
  541. continue;
  542. }
  543. // In this case, the corresponding OperatorInfo is not created, create the new one.
  544. bool is_last_nodes = IsPrimitiveCNode(cnode, prim::kPrimVirtualOutput);
  545. auto operator_info = CreateTheOperatorInfo(prim, cnode, is_last_nodes, &stra_map);
  546. MS_EXCEPTION_IF_NULL(operator_info);
  547. // Needed by rec_parser
  548. operator_info->set_type(prim->name());
  549. operator_info->set_last_node_flag(is_last_nodes);
  550. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  551. entire_costgraph->AddOperator(operator_info);
  552. cnode->set_user_data<OperatorInfo>(operator_info);
  553. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  554. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  555. << ", CNode fullname_with_scope: " << cnode->fullname_with_scope()
  556. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  557. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  558. if (single_loop && is_in_loop) {
  559. operators_in_forloop.push_back(operator_info);
  560. ops_in_a_loop_.insert(operator_info->name());
  561. loop_to_ops[loop_index]++;
  562. }
  563. // Needed by rec_parser
  564. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  565. } else {
  566. SetOperatorToCNode(search_cnode->second, prim, cnode);
  567. }
  568. }
  569. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  570. return SUCCESS;
  571. }
  572. void CreateEdgeBetweenTwoOps(const OperatorInfoPtr &prev_op_info, const OperatorInfoPtr &node_op_info,
  573. const CNodePtr &cnode, const CNodePtr &prev_cnode, const PrimitivePtr &prim,
  574. const PrimitivePtr &prev_prim, size_t output_index, size_t input_index,
  575. size_t *edge_count) {
  576. std::string edge_name = prev_op_info->name() + OPERATOR_TO_OPERATOR_CONNECTOR + node_op_info->name();
  577. // If the edge between these two operators already has been added, then the edge will not be added again.
  578. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, input_index - 1)) {
  579. return;
  580. }
  581. EdgePtr edge_ptr;
  582. MS_LOG(INFO) << "Creating edge: " << edge_name;
  583. if (IsOperatorsInTwoSeparateLoops(prev_cnode, cnode)) {
  584. MS_LOG(INFO) << "prev_cnode_fullname: " << prev_cnode->fullname_with_scope()
  585. << ", cnode_fullname: " << cnode->fullname_with_scope();
  586. MS_LOG(INFO) << "The two operators in two separate for-loops, thus skip the edge.";
  587. return;
  588. }
  589. const auto stra_follow = CostModelContext::GetInstance()->elementwise_stra_follow();
  590. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  591. (stra_follow && IsElementWiseOperator(prev_prim->name()));
  592. if (follow_strategy) {
  593. // Redistribution in not allowed on the edge.
  594. // Elementwise operators have the same strategy as their previous operators.
  595. edge_ptr =
  596. std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, input_index - 1, false, true);
  597. } else {
  598. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, input_index - 1, false);
  599. }
  600. // Init costs for this edge
  601. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  602. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  603. }
  604. node_op_info->AddPrevEdge(edge_ptr);
  605. prev_op_info->AddSuccEdge(edge_ptr);
  606. entire_costgraph->AddEdge(prev_op_info, node_op_info, edge_ptr);
  607. bool use_sp = (ParallelContext::GetInstance()->strategy_search_mode() == SHARDING_PROPAGATION) ||
  608. (ParallelContext::GetInstance()->sharding_propagation());
  609. if (use_sp && (prev_prim->name() == CAST) &&
  610. (configured_stra_ops_.find(node_op_info) != configured_stra_ops_.end())) {
  611. const auto next_op_stra = configured_stra_ops_[node_op_info];
  612. const auto cast_stra = edge_ptr->GetPrevOpStrategyByNextOpStrategyWithMiniComm(next_op_stra);
  613. if (cast_stra == nullptr) {
  614. MS_LOG(EXCEPTION) << "No available strategy for: " << prev_op_info->name();
  615. }
  616. prev_op_info->ClearStrategyCost();
  617. if (prev_op_info->SetCostUnderStrategy(cast_stra) != SUCCESS) {
  618. MS_LOG(EXCEPTION) << "Failure: operator " << prev_op_info->name() << " SetCostUnderStrategy failed";
  619. }
  620. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  621. MS_LOG(EXCEPTION) << "Edge cost re-initialization failed.";
  622. }
  623. MS_LOG(INFO) << "Set strategy for: " << prev_op_info->name() << " under the strategy of: " << node_op_info->name();
  624. (void)configured_stra_ops_.emplace(prev_op_info, cast_stra);
  625. }
  626. MS_LOG(INFO) << "Successfully adding the edge between " << prev_op_info->name() << " and " << node_op_info->name();
  627. (*edge_count)++;
  628. }
  629. void ApplyApproximationForGraphs() {
  630. // If 'approximation' is enabled, the edges need to be checked have effective costs.
  631. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  632. if (approximation) {
  633. entire_costgraph->CheckApproximateCostGraphEdges();
  634. }
  635. }
  636. static void ConstructCNodeCostGraphEdges(const mindspore::CNodePtr &cnode) {
  637. auto &inputs = cnode->inputs();
  638. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  639. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  640. size_t edge_count = 0;
  641. auto node_op_info = cnode->user_data<OperatorInfo>();
  642. for (size_t i = 1; i < inputs.size(); ++i) {
  643. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  644. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  645. if (bool_result_prev_cnode) {
  646. continue;
  647. }
  648. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  649. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  650. size_t output_index = 0;
  651. while ((IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == prim::kTupleGetItem) ||
  652. (prev_prim->name() == DEPEND)) {
  653. if (IsAutoParallelCareNode(prev_cnode)) {
  654. auto prev_op_info = prev_cnode->user_data<OperatorInfo>();
  655. CreateEdgeBetweenTwoOps(prev_op_info, node_op_info, cnode, prev_cnode, prim, prev_prim, output_index, i,
  656. &edge_count);
  657. break;
  658. } else if (prev_prim->name() == prim::kTupleGetItem) {
  659. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  660. // this 'tuple_getitem'
  661. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  662. output_index = LongToSize(GetValue<int64_t>(GetValueNode(prev_cnode->input(2))));
  663. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  664. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  665. if (bool_result_tuple) {
  666. break;
  667. }
  668. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  669. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  670. if (!IsAutoParallelCareNode(prev_cnode)) {
  671. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  672. }
  673. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  674. << "and creating an edge between the Operator before "
  675. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  676. } else if (prev_prim->name() == DEPEND) {
  677. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  678. // this 'depend'
  679. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  680. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  681. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  682. if (bool_result_depend) {
  683. break;
  684. }
  685. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  686. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  687. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  688. << "and creating an edge between the Operator before "
  689. << "'depend' and the Operator after 'depend'.";
  690. }
  691. }
  692. }
  693. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << node_op_info->name();
  694. }
  695. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  696. // Step 2
  697. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  698. for (auto &node : all_nodes) {
  699. auto cnode = node->cast<CNodePtr>();
  700. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  701. continue;
  702. }
  703. if (!IsAutoParallelCareNode(cnode)) {
  704. continue;
  705. }
  706. ConstructCNodeCostGraphEdges(cnode);
  707. }
  708. ApplyApproximationForGraphs();
  709. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  710. }
  711. void ApplyApproximationForParaNode(const OperatorInfoPtr &target_op_info) {
  712. // If 'approximation' is enabled, the edges need to be checked have effective costs.
  713. auto approximation = CostModelContext::GetInstance()->dp_algo_enable_approxi();
  714. if (approximation) {
  715. target_op_info->ExactStrategiesAndRelatedEdges();
  716. }
  717. }
  718. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  719. // Step 3
  720. for (auto &node : all_nodes) {
  721. ParameterUsersInfo parameter_users_info = FindParameterUsers(node, IsAutoParallelCareNode);
  722. auto parameter_name = parameter_users_info.first;
  723. auto target_parameter = parameter_users_info.second.first;
  724. auto target_set = parameter_users_info.second.second;
  725. if (target_set.size() <= 1) {
  726. continue;
  727. }
  728. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  729. std::set<std::string> target_without_duplicate;
  730. for (auto &target : target_set) {
  731. auto target_cnode = target.first->cast<CNodePtr>();
  732. auto input_index = target.second;
  733. (void)target_without_duplicate.insert(std::to_string(input_index) +
  734. target_cnode->user_data<OperatorInfo>()->name());
  735. }
  736. if (target_without_duplicate.size() <= 1) {
  737. continue;
  738. }
  739. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  740. OperatorInfoPtr tmp_identity_ptr;
  741. bool new_identity = false;
  742. std::string tmp_identity_name;
  743. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  744. if (returned_identity != nullptr) {
  745. // In this case, the TmpIdentityInfo instance has already been created
  746. new_identity = false;
  747. tmp_identity_ptr = returned_identity;
  748. tmp_identity_name = tmp_identity_ptr->name();
  749. } else {
  750. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  751. new_identity = true;
  752. // 1) extract input shape from this Parameter
  753. MS_EXCEPTION_IF_NULL(target_parameter);
  754. AbstractBasePtr abstract = target_parameter->abstract();
  755. if (abstract == nullptr) {
  756. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  757. }
  758. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  759. if (input_shape == nullptr) {
  760. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  761. }
  762. Shape shape = input_shape->shape();
  763. Shapes inputs_shape = {shape};
  764. Shapes outputs_shape = {shape};
  765. // 2) init the attr
  766. mindspore::HashMap<std::string, ValuePtr> attr = {};
  767. // Create the TmpIdentity instance
  768. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  769. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  770. TOTAL_OPS++;
  771. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  772. // Set the parameter and type lengths for inputs and outputs
  773. std::vector<bool> is_parameter;
  774. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  775. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  776. is_parameter.push_back(ParameterRequireGrad(casted_target_parameter));
  777. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  778. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  779. }
  780. auto node_type = target_parameter->Type();
  781. if (node_type->isa<mindspore::TensorType>()) {
  782. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  783. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  784. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  785. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  786. }
  787. } else {
  788. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  789. }
  790. // Generate strategies for this TmpIdentityInfo instance;
  791. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  792. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  793. }
  794. }
  795. // A flag recording whether new edges have been created or not
  796. bool add_identity_edge = false;
  797. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  798. for (auto &target : target_set) {
  799. auto target_cnode = target.first->cast<CNodePtr>();
  800. auto input_index = target.second;
  801. auto target_op_info = target_cnode->user_data<OperatorInfo>();
  802. std::string edge_name = std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_op_info->name();
  803. // If the edge between these two operators already has been added, then the edge will not be added again.
  804. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, LongToSize(input_index - 1))) {
  805. continue;
  806. }
  807. std::shared_ptr<Edge> edge_ptr =
  808. std::make_shared<Edge>(edge_name, tmp_identity_ptr, target_op_info, 0, input_index - 1, false, true);
  809. ApplyApproximationForParaNode(target_op_info);
  810. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  811. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  812. }
  813. target_op_info->AddPrevEdge(edge_ptr);
  814. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  815. entire_costgraph->AddEdge(tmp_identity_ptr, target_op_info, edge_ptr);
  816. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  817. << target_op_info->name();
  818. add_identity_edge = true;
  819. }
  820. if (new_identity && add_identity_edge) {
  821. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  822. entire_costgraph->AddOperator(tmp_identity_ptr);
  823. }
  824. }
  825. }
  826. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  827. mindspore::HashSet<std::string> op_cache;
  828. for (auto node : all_nodes) {
  829. auto cnode = node->cast<CNodePtr>();
  830. if (!FindReshape(cnode, &op_cache)) {
  831. continue;
  832. }
  833. MS_ASSERT(cnode->inputs().size() == 3);
  834. // get previous node's strategy_cost_
  835. auto pre_node = cnode->input(1);
  836. if (IsPrimitiveCNode(pre_node, prim::kPrimLoad)) {
  837. pre_node = pre_node->cast<CNodePtr>()->input(1);
  838. }
  839. int64_t out_index = 0;
  840. OperatorInfoPtr pre_operator_info;
  841. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  842. auto operator_info = cnode->user_data<OperatorInfo>();
  843. if (pre_node->isa<Parameter>()) {
  844. auto reshape_info1 = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  845. reshape_info1->SetCostForReshapeWithParameter();
  846. pre_operator_info = reshape_info1;
  847. pre_stra_costs = reshape_info1->strategy_cost();
  848. } else {
  849. if (!FindReshapePreNodeStraCosts(pre_node, &pre_operator_info, &out_index, 0)) {
  850. MS_LOG(EXCEPTION) << "FindReshapePreNodeStraCosts for reshape failed";
  851. }
  852. pre_stra_costs = pre_operator_info->strategy_cost();
  853. }
  854. // get next node's strategy_cost_
  855. int64_t in_index = 0;
  856. OperatorInfoPtr next_operator_info;
  857. bool is_next_reshape = false;
  858. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  859. bool find_next_node = FindReshapeNextNodeStraCosts(cnode, &next_operator_info, &in_index, &is_next_reshape, 0);
  860. if (!find_next_node) {
  861. MS_LOG(INFO) << "FindReshapeNextNodeStraCosts for reshape failed";
  862. }
  863. // set input_layout and output_layout for reshape.
  864. // init reshape and set cost for each input_layout and output_layout.
  865. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  866. reshape_info->set_pre_operator_name(pre_operator_info->name());
  867. reshape_info->set_pre_operator_index(out_index);
  868. if (find_next_node) {
  869. next_stra_costs = next_operator_info->strategy_cost();
  870. reshape_info->set_next_operator_name(next_operator_info->name());
  871. reshape_info->set_next_operator_index(in_index);
  872. }
  873. bool is_prev_param = pre_node->isa<Parameter>();
  874. if (reshape_info->GenerateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param,
  875. is_next_reshape) != SUCCESS) {
  876. MS_LOG(EXCEPTION) << "reshape generate strategy_costs failed!";
  877. }
  878. }
  879. }
  880. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  881. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  882. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  883. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  884. // for each OperatorInfo;
  885. // Step 1.1: Deal with 'Reshape':
  886. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  887. // layout as its output layout.
  888. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  889. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  890. // for each edge, based on the strategies of two OperatorInfos;
  891. // Step 3: Augment the costgraph:
  892. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  893. // operator for this Parameter, and add an edge for the use of this Parameter by each
  894. // subsequent operator;
  895. // Step 3.1: Calculate memory usage:
  896. // note the memory usage calculation is different in training phase and inference phase.
  897. // Step 4: Run the strategy searching algorithm:
  898. // If 'sharding_propagation' is configured to be true, then the configured-sharding-strategies will propagate
  899. // to the non-configured operators, with the goal of minimizing redistribution cost.
  900. // Otherwise, DP algorithm is used to search strategy of the costgraph. Note that there may be several connected
  901. // components in the costgraph, and the DP algorithm runs on each of them.
  902. //
  903. // OUTPUT: the determined strategy for each operator.
  904. InitCostGraph();
  905. // Step 1
  906. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  907. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  908. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  909. << entire_costgraph->GetOperators().size() << " operators.";
  910. } else {
  911. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  912. }
  913. } else {
  914. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  915. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  916. << entire_costgraph->GetOperators().size() << " operators.";
  917. } else {
  918. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  919. }
  920. }
  921. // Step 1.1
  922. ReshapeCostCompute(all_nodes);
  923. // Step 2
  924. ConstructCostGraphEdges(all_nodes);
  925. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  926. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  927. // Step 3: Augment the costgraph.
  928. AugmentCostGraph(all_nodes);
  929. auto num_ops = entire_costgraph->GetOperators().size();
  930. SetOpsNumToExecutor(num_ops);
  931. auto num_edges = entire_costgraph->GetNumEdges();
  932. MS_LOG(INFO) << "After the augmenting procedure, there are " << num_ops << " operators, and " << num_edges
  933. << " edges.";
  934. // Step 3.1: Calculate the memory usage
  935. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  936. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  937. }
  938. // Step 4: run the strategy searching algorithm
  939. bool use_sp = (ParallelContext::GetInstance()->strategy_search_mode() == SHARDING_PROPAGATION) ||
  940. (ParallelContext::GetInstance()->sharding_propagation());
  941. if (use_sp) {
  942. entire_costgraph->StrategyPropagate(configured_stra_ops_);
  943. configured_stra_ops_.clear();
  944. } else if (GetStrategy(entire_costgraph) != SUCCESS) {
  945. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  946. return FAILED;
  947. }
  948. MS_LOG(INFO) << "Searching strategy succeeded.";
  949. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  950. MS_LOG(INFO) << "Init selected strategy succeeded.";
  951. } else {
  952. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  953. }
  954. // print the selected strategy
  955. for (auto &op : entire_costgraph->GetOperators()) {
  956. StrategyPtr s_strategy = op->selected_strategy();
  957. MS_LOG(INFO) << op->name() << " : The strategy is:";
  958. PrintStrategy(s_strategy);
  959. }
  960. ops_in_a_loop_.clear();
  961. return SUCCESS;
  962. }
  963. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  964. std::vector<std::vector<std::string>> input_tensor_names) {
  965. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  966. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  967. if (it->first == input_tensor_names[j][k]) {
  968. input_tensor_names[j][k] = it->second;
  969. break;
  970. }
  971. }
  972. }
  973. return input_tensor_names;
  974. }
  975. CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node) {
  976. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  977. if (prim->name() == prim::kTupleGetItem || prim->name() == DEPEND) {
  978. auto prev_cnode = cnode->input(1)->cast<CNodePtr>();
  979. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  980. return nullptr;
  981. }
  982. auto prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  983. while (prev_prim->name() == prim::kTupleGetItem || prev_prim->name() == DEPEND) {
  984. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  985. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  986. return nullptr;
  987. }
  988. prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  989. }
  990. return prev_cnode;
  991. }
  992. return nullptr;
  993. }
  994. void ModifyInputsTensorNameListIfOperatorInfoCreated(const std::string &name, const std::string &uniqueid) {
  995. size_t iter_ops = 0;
  996. for (auto op : entire_costgraph->GetOperators()) {
  997. if (op->name() == name) {
  998. break;
  999. }
  1000. iter_ops = iter_ops + 1;
  1001. }
  1002. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1003. for (size_t i = 0; i < input_tensor_names.size(); i++) {
  1004. for (size_t j = 0; j < input_tensor_names[i].size(); j++) {
  1005. if (input_tensor_names[i][j] == uniqueid) {
  1006. input_tensor_names[i][j] = input_tensor_names[iter_ops][0];
  1007. }
  1008. }
  1009. }
  1010. entire_costgraph->set_inputs_tensor_name_list(input_tensor_names);
  1011. }
  1012. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1013. InitCostGraph();
  1014. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  1015. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  1016. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1017. << entire_costgraph->GetOperators().size() << " operators.";
  1018. } else {
  1019. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1020. }
  1021. } else {
  1022. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1023. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1024. << entire_costgraph->GetOperators().size() << " operators.";
  1025. } else {
  1026. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1027. }
  1028. }
  1029. ReshapeCostCompute(all_nodes);
  1030. ConstructCostGraphEdges(all_nodes);
  1031. auto ops = entire_costgraph->GetOperators();
  1032. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1033. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1034. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1035. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1036. }
  1037. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1038. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list = std::make_shared<std::vector<std::vector<size_t>>>();
  1039. std::shared_ptr<std::vector<size_t>> index_list = std::make_shared<std::vector<size_t>>();
  1040. graph = EliminateGraph(graph, eli_list, index_list);
  1041. size_t num_device = g_device_manager->DeviceNum();
  1042. const auto device_memory = CostModelContext::GetInstance()->device_memory_capacity();
  1043. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1044. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1045. } else {
  1046. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1047. return FAILED;
  1048. }
  1049. bool is_training = true;
  1050. if (!root->has_flag(TRAINING)) {
  1051. is_training = false;
  1052. }
  1053. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list, is_training);
  1054. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1055. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1056. } else {
  1057. MS_LOG(ERROR) << "Init selected strategy failed.";
  1058. return FAILED;
  1059. }
  1060. // print the selected strategy
  1061. for (auto &op : entire_costgraph->GetOperators()) {
  1062. StrategyPtr s_strategy = op->selected_strategy();
  1063. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1064. PrintStrategy(s_strategy);
  1065. }
  1066. return SUCCESS;
  1067. }
  1068. } // namespace parallel
  1069. } // namespace mindspore