You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 52 kB

5 years ago
5 years ago
6 years ago
6 years ago
5 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_auto_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include <unordered_set>
  28. #include "frontend/optimizer/opt.h"
  29. #include "frontend/optimizer/optimizer.h"
  30. #include "frontend/parallel/auto_parallel/dp_algo_costmodel.h"
  31. #include "frontend/parallel/auto_parallel/edge_costmodel.h"
  32. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  33. #include "frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  34. #include "frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h"
  35. #include "frontend/parallel/auto_parallel/rec_core/rec_partition.h"
  36. #include "frontend/parallel/context.h"
  37. #include "frontend/parallel/graph_util/node_info.h"
  38. #include "frontend/parallel/ops_info/reshape_info.h"
  39. #include "frontend/parallel/ops_info/tmp_identity_info.h"
  40. #include "frontend/parallel/step_parallel.h"
  41. #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  42. #include "ir/anf.h"
  43. #include "ir/param_info.h"
  44. #include "ir/tensor.h"
  45. namespace mindspore {
  46. namespace parallel {
  47. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  48. MS_EXCEPTION_IF_NULL(root);
  49. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  50. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  51. // assume no change to graph
  52. bool changes = false;
  53. // control whether use model_parallel mode
  54. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  55. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  56. return changes;
  57. }
  58. // check whether strategy_search_mode is valid
  59. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  60. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  61. // Setting searching mode: dynanic programming as default.
  62. strategy_search_mode = DYNAMIC_PROGRAMMING;
  63. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  64. }
  65. struct timeval start_time, end_time;
  66. (void)gettimeofday(&start_time, nullptr);
  67. if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
  68. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  69. }
  70. MS_LOG(INFO) << "Now entering step auto parallel";
  71. TOTAL_OPS = 0;
  72. AnfNodePtr ret = root->get_return();
  73. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  74. if (ParallelInit() != SUCCESS) {
  75. MS_LOG(EXCEPTION) << "Parallel init failed";
  76. }
  77. // mark the forward cnodes, parallel only care these nodes
  78. MarkForwardCNode(root);
  79. if (FindCommunicationOp(all_nodes)) {
  80. MS_LOG(EXCEPTION) << "The graph contain communication op";
  81. }
  82. // search parallelization strategy
  83. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  84. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  85. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  86. }
  87. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  88. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  89. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  90. }
  91. } else {
  92. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  93. }
  94. (void)gettimeofday(&end_time, nullptr);
  95. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  96. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  97. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  98. root->set_flag(AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  99. return changes;
  100. }
  101. // Given the node, return whether each input is a parameter or a output of a operator.
  102. // The returned boolean vector should be the same order of the inputs, thus its implementation
  103. // is closely consistent with ExtractShape() in step_parallel.cc
  104. std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
  105. std::vector<bool> is_parameter;
  106. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  107. if ((node_inputs.size() == 2) &&
  108. (AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE) || AnfNodeIsPrimitive(node_inputs[1], MAKE_LIST))) {
  109. node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
  110. }
  111. for (size_t i = 1; i < node_inputs.size(); ++i) {
  112. auto input = node_inputs[i];
  113. if (input->isa<Parameter>()) {
  114. auto input_parameter = input->cast<ParameterPtr>();
  115. is_parameter.push_back(ParameterRequireGrad(input_parameter));
  116. } else if (input->isa<CNode>() || IsValueNode<tensor::Tensor>(input) || IsValueNode<RefKey>(input)) {
  117. is_parameter.push_back(false);
  118. }
  119. }
  120. return is_parameter;
  121. }
  122. // Given the type, return the number of bytes to represent this type
  123. size_t GetLengthOfDataType(const TypePtr &type) {
  124. switch (type->type_id()) {
  125. case kNumberTypeBool:
  126. return sizeof(bool);
  127. case kNumberTypeInt8:
  128. return sizeof(int8_t);
  129. case kNumberTypeInt16:
  130. return sizeof(int16_t);
  131. case kNumberTypeInt32:
  132. return sizeof(int32_t);
  133. case kNumberTypeInt64:
  134. return sizeof(int64_t);
  135. case kNumberTypeUInt8:
  136. return sizeof(uint8_t);
  137. case kNumberTypeUInt16:
  138. return sizeof(uint16_t);
  139. case kNumberTypeUInt32:
  140. return sizeof(uint32_t);
  141. case kNumberTypeUInt64:
  142. return sizeof(uint64_t);
  143. case kNumberTypeFloat16:
  144. return sizeof(float) / 2;
  145. case kNumberTypeFloat32:
  146. return sizeof(float);
  147. case kNumberTypeFloat64:
  148. return sizeof(double);
  149. case kNumberTypeInt:
  150. return sizeof(int);
  151. case kNumberTypeUInt:
  152. return sizeof(unsigned int);
  153. case kNumberTypeFloat:
  154. return sizeof(float);
  155. default:
  156. MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
  157. }
  158. }
  159. size_t GetInputsTypeLen(const AnfNodePtr &input) {
  160. MS_EXCEPTION_IF_NULL(input);
  161. if (!input->isa<CNode>() && !input->isa<Parameter>() && !IsValueNode<tensor::Tensor>(input)) {
  162. MS_LOG(EXCEPTION) << "The input node is not a cnode or parameter or tensor";
  163. }
  164. size_t input_type_len = 0;
  165. auto type = input->Type();
  166. MS_EXCEPTION_IF_NULL(type);
  167. if (type->isa<mindspore::TensorType>()) {
  168. auto input_element_type = type->cast<mindspore::TensorTypePtr>()->element();
  169. input_type_len = GetLengthOfDataType(input_element_type);
  170. } else {
  171. MS_LOG(EXCEPTION) << "Unknown type: " << type->type_name();
  172. }
  173. return input_type_len;
  174. }
  175. std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
  176. MS_EXCEPTION_IF_NULL(node);
  177. std::vector<size_t> inputs_type_len;
  178. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  179. if ((node_inputs.size() == 2) &&
  180. (AnfNodeIsPrimitive(node_inputs[1], MAKE_TUPLE) || AnfNodeIsPrimitive(node_inputs[1], MAKE_LIST))) {
  181. node_inputs = node_inputs[1]->cast<CNodePtr>()->inputs();
  182. }
  183. // extract input element length
  184. for (auto &input : node_inputs) {
  185. if (IsValueNode<RefKey>(input)) {
  186. auto func_graph = node->func_graph();
  187. MS_EXCEPTION_IF_NULL(func_graph);
  188. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  189. if (parameters.size() != 1) {
  190. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  191. }
  192. inputs_type_len.push_back(GetInputsTypeLen(parameters[0]));
  193. } else if (input->isa<CNode>() || input->isa<Parameter>() || IsValueNode<tensor::Tensor>(input)) {
  194. // extract input shape from parameter and apply node
  195. inputs_type_len.push_back(GetInputsTypeLen(input));
  196. }
  197. }
  198. return inputs_type_len;
  199. }
  200. std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
  201. MS_EXCEPTION_IF_NULL(node);
  202. std::vector<TypePtr> outputs_type;
  203. // extract output element type
  204. auto primary_output_type = node->Type();
  205. MS_EXCEPTION_IF_NULL(primary_output_type);
  206. if (primary_output_type->isa<mindspore::Tuple>()) {
  207. // in this case, the output is a tuple
  208. auto tuple_output_type = primary_output_type->cast<mindspore::TuplePtr>();
  209. auto elements = tuple_output_type->elements();
  210. for (auto &ele : elements) {
  211. if (ele->isa<mindspore::TensorType>()) {
  212. auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
  213. outputs_type.push_back(ele_element_type);
  214. } else {
  215. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  216. }
  217. }
  218. } else {
  219. // in this case, the output is a single tensor
  220. if (primary_output_type->isa<mindspore::TensorType>()) {
  221. auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
  222. outputs_type.push_back(element_type);
  223. } else {
  224. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  225. }
  226. }
  227. return outputs_type;
  228. }
  229. bool IsElementWiseOperator(const std::string &op_name) {
  230. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH,
  231. SOFTMAX, LOG_SOFTMAX, RELU,
  232. SQRT, CAST, POW,
  233. EXP, LOG, COS,
  234. ACOS, LOGICALNOT, NEG,
  235. SQUARE, SIGMOID, ABS,
  236. ACOSH, ASIN, ASINH,
  237. ATAN, ATANH, CEIL,
  238. COSH, EXPM1, LOG1P,
  239. SIN, SINH, TAN,
  240. RSQRT, RECIPROCAL, INV,
  241. ROUND, FLOOR, SIGN,
  242. ERF, ERFC, ZEROSLIKE,
  243. ONESLIKE, BESSELI0E, MOD,
  244. ASSIGN, ASSIGN_ADD, ATAN2,
  245. DIVNONAN, LOGICALAND, ELU,
  246. LOGICALOR, RELU6, SOFTPLUS,
  247. SOFTSIGN, LESS, LESSEQUAL,
  248. BESSELI1E, GREATEREQUAL, APPROXIMATEEQUAL};
  249. auto iter = elementwise_op.find(op_name);
  250. return (iter != elementwise_op.end());
  251. }
  252. bool IsSplittableOperator(const std::string &op_name) {
  253. // clang-format off
  254. static const std::set<std::string> splittable_op =
  255. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  256. FLOORDIV, L2_NORMALIZE, TENSOR_ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  257. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  258. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP, PACK,
  259. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT,
  260. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2, TILE, DROPOUT,
  261. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS,
  262. EMBEDDING_LOOKUP, FUSE_BATCH_NORM_EX, SPLIT, BROADCAST_TO, ABS, ACOSH, ASIN, ASINH, ATAN, ATANH, CEIL, COSH,
  263. EXPM1, LOG1P, SIN, SINH, TAN, RSQRT, INV, RECIPROCAL, ROUND, FLOOR, SIGN, ERF, ERFC, ZEROSLIKE, ONESLIKE,
  264. BESSELI0E, BESSELI1E, FLOORMOD, ASSIGN, ASSIGN_ADD, ATAN2, DIVNONAN, LOGICALAND, LOGICALOR, ELU, RELU6, RELUV2,
  265. SOFTPLUS, SOFTSIGN, GREATEREQUAL, LESSEQUAL, LESS, APPROXIMATEEQUAL, MOD};
  266. // clang-format on
  267. auto iter = splittable_op.find(op_name);
  268. return (iter != splittable_op.end());
  269. }
  270. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  271. MS_EXCEPTION_IF_NULL(cnode);
  272. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  273. if (prim_node == nullptr) {
  274. return false;
  275. }
  276. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  277. if (prim == nullptr) {
  278. return false;
  279. }
  280. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  281. if (bool_result && (prim->name() != MAKE_TUPLE) && (prim->name() != MAKE_LIST)) {
  282. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  283. } else if (prim->name() == CAST) {
  284. if (cnode->fullname_with_scope().find(OPTIMIZER_SUB_STRING) != std::string::npos) {
  285. // Do not care CASTs from optimizer
  286. return false;
  287. }
  288. return true;
  289. }
  290. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  291. }
  292. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, StrategyMap *stra_map) {
  293. MS_EXCEPTION_IF_NULL(prim);
  294. MS_EXCEPTION_IF_NULL(cnode);
  295. auto attrs = prim->attrs();
  296. std::vector<Shapes> shape_list = ExtractShape(cnode);
  297. if (shape_list.empty()) {
  298. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  299. }
  300. // Create an OperatorInfo instance
  301. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  302. MS_EXCEPTION_IF_NULL(operator_info);
  303. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  304. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  305. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  306. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  307. return nullptr;
  308. }
  309. // Set the data type for inputs and outputs of this OperatorInfo
  310. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  311. auto outputs_type = ExtractOutputTypeByNode(cnode);
  312. std::vector<size_t> outputs_type_length;
  313. outputs_type_length.reserve(outputs_type.size());
  314. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  315. GetLengthOfDataType);
  316. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  317. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  318. return nullptr;
  319. }
  320. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  321. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  322. return nullptr;
  323. }
  324. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  325. // ANF graph
  326. auto &inputs = cnode->inputs();
  327. std::vector<ValuePtr> input_value;
  328. for (size_t index = 1; index < inputs.size(); ++index) {
  329. if (inputs[index]->isa<ValueNode>()) {
  330. input_value.push_back(GetValueNode(inputs[index]));
  331. } else {
  332. input_value.emplace_back(nullptr);
  333. }
  334. }
  335. operator_info->set_input_value(input_value);
  336. operator_info->set_outputs_dtype(cnode->Type());
  337. operator_info->set_cnode(cnode);
  338. // key of strategy map
  339. std::string strategy_key_name = "";
  340. auto param_names = NodeParameterName(cnode);
  341. if (!param_names.empty()) {
  342. strategy_key_name = prim->name() + "_" + param_names[0].first;
  343. }
  344. bool load_strategy_from_ckpt =
  345. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  346. // If no strategy has been configured for this operator, then candidate strategies are generated for
  347. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  348. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  349. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  350. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  351. // BatchParallelInfo operator
  352. operator_info->ComputeBatchSplitFlagList();
  353. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  354. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  355. return nullptr;
  356. }
  357. } else {
  358. // In this case, the configured strategy should be extracted to help setting cost
  359. StrategyPtr strategyPtr;
  360. if (load_strategy_from_ckpt) {
  361. strategyPtr = (*stra_map)[strategy_key_name];
  362. } else {
  363. strategyPtr = parallel::ExtractStrategy(attrs);
  364. }
  365. if (strategyPtr != nullptr) {
  366. if (prim->name() == RESHAPE) {
  367. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  368. }
  369. // Set cost for this configured strategy
  370. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  371. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  372. } else if (FULLY_USE_DEVICES) {
  373. // If configured to fully use devices, then checking for the user-specified strategy
  374. int32_t used_devices = operator_info->used_devices();
  375. MS_EXCEPTION_IF_NULL(g_device_manager);
  376. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  377. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  378. if (used_devices == 1) {
  379. return operator_info;
  380. }
  381. // 'used_devices == -1' means that 'used_devices_' is not set
  382. if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) {
  383. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  384. << "but the specified strategy uses device: " << used_devices
  385. << ", total devices: " << total_device_num;
  386. }
  387. }
  388. }
  389. }
  390. return operator_info;
  391. }
  392. // Using CNode's UniqueIds to construct nodes
  393. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  394. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  395. entire_costgraph = std::make_shared<CostGraph>();
  396. entire_costgraph->SetDeviceMemoryAndCostParameter();
  397. // The map from CNode's UniqueId to its operatorInfo
  398. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  399. // extract strategy from checkpoint for multi-train
  400. StrategyMap stra_map;
  401. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  402. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  403. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  404. }
  405. }
  406. // Step 1
  407. for (auto &node : all_nodes) {
  408. // NOTE: we only care about splittable Primitive operators
  409. auto cnode = node->cast<CNodePtr>();
  410. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  411. if (bool_result) {
  412. continue;
  413. }
  414. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  415. if (!IsAutoParallelCareNode(cnode)) {
  416. // Needed by rec_parser
  417. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  418. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  419. if (prev_cnode != nullptr) {
  420. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  421. }
  422. }
  423. continue;
  424. }
  425. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  426. MS_EXCEPTION_IF_NULL(prim);
  427. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  428. if (search_cnode == from_cnode_to_info.end()) {
  429. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  430. if (operator_info == nullptr) {
  431. return FAILED;
  432. }
  433. // Needed by rec_parser
  434. operator_info->set_type(prim->name());
  435. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  436. entire_costgraph->AddOperator(operator_info);
  437. cnode->set_user_data<OperatorInfo>(operator_info);
  438. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  439. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  440. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  441. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  442. // Needed by rec_parser
  443. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  444. } else {
  445. // Two CNODEs' UniqueIds should not be equal
  446. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  447. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  448. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  449. }
  450. }
  451. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  452. return SUCCESS;
  453. }
  454. // Using CNode's UniqueIdThroughCopys to construct nodes
  455. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  456. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  457. entire_costgraph = std::make_shared<CostGraph>();
  458. entire_costgraph->SetDeviceMemoryAndCostParameter();
  459. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  460. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  461. // extract strategy from checkpoint for multi-train
  462. StrategyMap stra_map;
  463. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  464. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  465. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  466. }
  467. }
  468. for (auto &node : all_nodes) {
  469. // NOTE: we only care about splittable Primitive operators
  470. auto cnode = node->cast<CNodePtr>();
  471. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  472. if (bool_result) {
  473. continue;
  474. }
  475. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  476. if (!IsAutoParallelCareNode(cnode)) {
  477. // Needed by rec_parser
  478. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  479. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  480. if (prev_cnode != nullptr) {
  481. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  482. }
  483. }
  484. continue;
  485. }
  486. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  487. // Find the operatorInfo if it exists
  488. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  489. if (search_cnode == from_cnode_to_info.end()) {
  490. // In this case, the corresponding OperatorInfo is not created, create the new one.
  491. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  492. if (operator_info == nullptr) {
  493. return FAILED;
  494. }
  495. // Needed by rec_parser
  496. operator_info->set_type(prim->name());
  497. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  498. entire_costgraph->AddOperator(operator_info);
  499. cnode->set_user_data<OperatorInfo>(operator_info);
  500. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  501. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  502. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  503. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  504. // Needed by rec_parser
  505. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  506. } else {
  507. auto current_op_ptr = search_cnode->second;
  508. if (current_op_ptr == nullptr) {
  509. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  510. } else {
  511. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  512. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  513. (current_op_ptr->name().find(prim->name()) == std::string::npos);
  514. if (is_find_wrong) {
  515. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  516. << " does not match the Prim: " << prim->name();
  517. }
  518. // Needed by rec_parser
  519. ModifyInputsTensorNameListIfOperatorInfoCreated(current_op_ptr->name(), cnode->UniqueId());
  520. cnode->set_user_data<OperatorInfo>(current_op_ptr);
  521. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  522. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  523. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  524. }
  525. }
  526. }
  527. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  528. return SUCCESS;
  529. }
  530. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  531. // Step 2
  532. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  533. for (auto &node : all_nodes) {
  534. auto cnode = node->cast<CNodePtr>();
  535. bool bool_result_cnode = (cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0));
  536. if (bool_result_cnode) {
  537. continue;
  538. }
  539. auto &inputs = cnode->inputs();
  540. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  541. if (!IsAutoParallelCareNode(cnode)) {
  542. continue;
  543. }
  544. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  545. size_t edge_count = 0;
  546. auto node_op_info = cnode->user_data<OperatorInfo>();
  547. for (size_t i = 1; i < inputs.size(); ++i) {
  548. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  549. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  550. if (bool_result_prev_cnode) {
  551. continue;
  552. }
  553. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  554. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  555. size_t output_index = 0;
  556. bool bool_result =
  557. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  558. while (bool_result) {
  559. if (IsAutoParallelCareNode(prev_cnode)) {
  560. auto prev_op_info = prev_cnode->user_data<OperatorInfo>();
  561. std::string edge_name = prev_op_info->name() + OPERATOR_TO_OPERATOR_CONNECTOR + node_op_info->name();
  562. // If the edge between these two operators already has been added, then the edge will not be added again.
  563. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, i - 1)) {
  564. break;
  565. }
  566. EdgePtr edge_ptr;
  567. MS_LOG(INFO) << "Creating edge: " << edge_name;
  568. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  569. (ELEMENTWISE_OP_STRA_FOLLOW && IsElementWiseOperator(prev_prim->name()));
  570. if (follow_strategy) {
  571. // Redistribution in not allowed on the edge.
  572. // Elementwise operators have the same strategy as their previous operators.
  573. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, i - 1, false, true);
  574. } else {
  575. edge_ptr = std::make_shared<Edge>(edge_name, prev_op_info, node_op_info, output_index, i - 1, false);
  576. }
  577. // Init costs for this edge
  578. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  579. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  580. }
  581. node_op_info->AddPrevEdge(edge_ptr);
  582. prev_op_info->AddSuccEdge(edge_ptr);
  583. entire_costgraph->AddEdge(prev_op_info, node_op_info, edge_ptr);
  584. MS_LOG(INFO) << "Successfully adding the edge between " << prev_op_info->name() << " and "
  585. << node_op_info->name();
  586. edge_count++;
  587. break;
  588. } else if (prev_prim->name() == TUPLE_GETITEM) {
  589. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  590. // this 'tuple_getitem'
  591. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  592. output_index = IntToSize(GetValue<int>(GetValueNode(prev_cnode->input(2))));
  593. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  594. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  595. if (bool_result_tuple) {
  596. break;
  597. }
  598. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  599. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  600. if (!IsAutoParallelCareNode(prev_cnode)) {
  601. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  602. }
  603. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  604. << "and creating an edge between the Operator before "
  605. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  606. } else if (prev_prim->name() == DEPEND) {
  607. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  608. // this 'depend'
  609. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  610. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  611. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  612. if (bool_result_depend) {
  613. break;
  614. }
  615. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  616. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  617. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  618. << "and creating an edge between the Operator before "
  619. << "'depend' and the Operator after 'depend'.";
  620. }
  621. bool_result =
  622. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  623. }
  624. }
  625. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << node_op_info->name();
  626. }
  627. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  628. }
  629. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  630. // Step 3
  631. for (auto &node : all_nodes) {
  632. ParameterUsersInfo parameter_users_info = FindParameterUsers(node, IsAutoParallelCareNode);
  633. auto parameter_name = parameter_users_info.first;
  634. auto target_parameter = parameter_users_info.second.first;
  635. auto target_set = parameter_users_info.second.second;
  636. if (target_set.size() <= 1) {
  637. continue;
  638. }
  639. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  640. std::set<std::string> target_without_duplicate;
  641. for (auto &target : target_set) {
  642. auto target_cnode = target.first->cast<CNodePtr>();
  643. auto input_index = target.second;
  644. (void)target_without_duplicate.insert(std::to_string(input_index) +
  645. target_cnode->user_data<OperatorInfo>()->name());
  646. }
  647. if (target_without_duplicate.size() <= 1) {
  648. continue;
  649. }
  650. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  651. OperatorInfoPtr tmp_identity_ptr;
  652. bool new_identity = false;
  653. std::string tmp_identity_name;
  654. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  655. if (returned_identity != nullptr) {
  656. // In this case, the TmpIdentityInfo instance has already been created
  657. new_identity = false;
  658. tmp_identity_ptr = returned_identity;
  659. tmp_identity_name = tmp_identity_ptr->name();
  660. } else {
  661. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  662. new_identity = true;
  663. // 1) extract input shape from this Parameter
  664. MS_EXCEPTION_IF_NULL(target_parameter);
  665. AbstractBasePtr abstract = target_parameter->abstract();
  666. if (abstract == nullptr) {
  667. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  668. }
  669. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  670. if (input_shape == nullptr) {
  671. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  672. }
  673. std::vector<int> shape_int = input_shape->shape();
  674. Shape shape;
  675. (void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
  676. [](int sub_shape) { return static_cast<int64_t>(sub_shape); });
  677. Shapes inputs_shape = {shape};
  678. Shapes outputs_shape = {shape};
  679. // 2) init the attr
  680. std::unordered_map<std::string, ValuePtr> attr = {};
  681. // Create the TmpIdentity instance
  682. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  683. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  684. TOTAL_OPS++;
  685. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  686. // Set the parameter and type lengths for inputs and outputs
  687. std::vector<bool> is_parameter;
  688. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  689. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  690. is_parameter.push_back(ParameterRequireGrad(casted_target_parameter));
  691. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  692. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  693. }
  694. auto node_type = target_parameter->Type();
  695. if (node_type->isa<mindspore::TensorType>()) {
  696. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  697. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  698. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  699. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  700. }
  701. } else {
  702. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  703. }
  704. // Generate strategies for this TmpIdentityInfo instance;
  705. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  706. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  707. }
  708. }
  709. // A flag recording whether new edges have been created or not
  710. bool add_identity_edge = false;
  711. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  712. for (auto &target : target_set) {
  713. auto target_cnode = target.first->cast<CNodePtr>();
  714. auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
  715. auto input_index = target.second;
  716. auto target_op_info = target_cnode->user_data<OperatorInfo>();
  717. std::string edge_name = std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_op_info->name();
  718. // If the edge between these two operators already has been added, then the edge will not be added again.
  719. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, IntToSize(input_index - 1))) {
  720. continue;
  721. }
  722. std::shared_ptr<Edge> edge_ptr =
  723. std::make_shared<Edge>(edge_name, tmp_identity_ptr, target_op_info, 0, input_index - 1, false, true);
  724. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  725. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  726. }
  727. target_op_info->AddPrevEdge(edge_ptr);
  728. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  729. entire_costgraph->AddEdge(tmp_identity_ptr, target_op_info, edge_ptr);
  730. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  731. << target_op_info->name();
  732. add_identity_edge = true;
  733. }
  734. if (new_identity && add_identity_edge) {
  735. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  736. entire_costgraph->AddOperator(tmp_identity_ptr);
  737. }
  738. }
  739. }
  740. bool FindReshape(const CNodePtr &cnode, std::unordered_set<std::string> *op_cache) {
  741. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  742. return false;
  743. }
  744. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  745. return false;
  746. }
  747. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  748. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  749. MS_EXCEPTION_IF_NULL(prim);
  750. if (prim->name() == RESHAPE) {
  751. auto operator_info = cnode->user_data<OperatorInfo>();
  752. std::string op_info_name = operator_info->name();
  753. if (op_cache->find(op_info_name) != op_cache->end()) {
  754. return false;
  755. }
  756. op_cache->insert(op_info_name);
  757. return true;
  758. }
  759. return false;
  760. }
  761. // find previous node, then obtain its strategy_cost_ vector to get its layout vector.
  762. bool FindPreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int32_t *out_index) {
  763. // if previous node is a parameter, handle it in the outsize.
  764. if (node->isa<Parameter>()) {
  765. return false;
  766. }
  767. if (!node->isa<CNode>()) {
  768. return false;
  769. }
  770. CNodePtr cnode = node->cast<CNodePtr>();
  771. if (!IsValueNode<Primitive>(cnode->input(0))) {
  772. return false;
  773. }
  774. auto node_op_info = cnode->user_data<OperatorInfo>();
  775. if (IsParallelCareNode(cnode) && (node_op_info != nullptr)) {
  776. *pre_operator_info = node_op_info;
  777. *out_index = 0;
  778. return true;
  779. }
  780. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  781. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  782. if (prim->name() == TUPLE_GETITEM) {
  783. *out_index = GetTupleGetItemIndex(cnode);
  784. // find tuple_get_item's previous node
  785. auto pre_node = cnode->input(1);
  786. if (!pre_node->isa<CNode>()) {
  787. MS_LOG(EXCEPTION) << "tuple get item's second input is not a cnode";
  788. }
  789. CNodePtr pre_cnode = pre_node->cast<CNodePtr>();
  790. auto pre_op_info = pre_cnode->user_data<OperatorInfo>();
  791. if (IsParallelCareNode(pre_cnode) && (pre_op_info != nullptr)) {
  792. *pre_operator_info = pre_op_info;
  793. return true;
  794. }
  795. return false;
  796. }
  797. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  798. if (prim->name() == DEPEND && index != 1) {
  799. continue;
  800. }
  801. if (!FindPreNodeStraCosts(cnode->inputs()[index], pre_operator_info, out_index)) {
  802. continue;
  803. }
  804. return true;
  805. }
  806. MS_LOG(WARNING) << "FindPreNodeStraCosts failed, if reshape is not the first primitive, there must be some error";
  807. return false;
  808. }
  809. // find next node, then obtain its strategy_cost_ vector to get its layout vector.
  810. // if reshape's output connect to several primitive, return the first layout found
  811. bool FindNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int32_t *in_index) {
  812. MS_EXCEPTION_IF_NULL(cnode);
  813. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  814. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  815. MS_EXCEPTION_IF_NULL(manager);
  816. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  817. for (auto &node_pair : node_set) {
  818. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  819. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  820. continue;
  821. }
  822. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  823. MS_EXCEPTION_IF_NULL(prim_anf_node);
  824. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  825. MS_EXCEPTION_IF_NULL(node_prim);
  826. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  827. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  828. continue;
  829. }
  830. auto op_info = use_apply->user_data<OperatorInfo>();
  831. if (IsParallelCareNode(use_apply) && (op_info != nullptr)) {
  832. MS_LOG(INFO) << "FindNextNodeStraCosts success prim " << node_prim->name();
  833. *next_operator_info = op_info;
  834. *in_index = node_pair.second - 1;
  835. return true;
  836. }
  837. MS_LOG(DEBUG) << "FindNextNodeStraCosts failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  838. << " " << (op_info != nullptr);
  839. if (FindNextNodeStraCosts(use_apply, next_operator_info, in_index)) {
  840. return true;
  841. }
  842. }
  843. return false;
  844. }
  845. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  846. std::unordered_set<std::string> op_cache;
  847. for (auto node : all_nodes) {
  848. auto cnode = node->cast<CNodePtr>();
  849. if (!FindReshape(cnode, &op_cache)) {
  850. continue;
  851. }
  852. MS_ASSERT(cnode->inputs().size() == 3);
  853. // get previous node's strategy_cost_
  854. auto pre_node = cnode->input(1);
  855. int32_t out_index = 0;
  856. OperatorInfoPtr pre_operator_info;
  857. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  858. auto operator_info = cnode->user_data<OperatorInfo>();
  859. if (pre_node->isa<Parameter>()) {
  860. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  861. reshape_info->SetCostForReshapeWithParameter();
  862. pre_operator_info = reshape_info;
  863. pre_stra_costs = reshape_info->strategy_cost();
  864. } else {
  865. if (!FindPreNodeStraCosts(pre_node, &pre_operator_info, &out_index)) {
  866. MS_LOG(EXCEPTION) << "FindPreNodeStraCosts for reshape failed";
  867. }
  868. pre_stra_costs = pre_operator_info->strategy_cost();
  869. }
  870. // get next node's strategy_cost_
  871. int32_t in_index = 0;
  872. OperatorInfoPtr next_operator_info;
  873. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  874. bool find_next_node = FindNextNodeStraCosts(cnode, &next_operator_info, &in_index);
  875. if (!find_next_node) {
  876. MS_LOG(INFO) << "FindNextNodeStraCosts for reshape failed";
  877. }
  878. // set input_layout and output_layout for reshape.
  879. // init reshape and set cost for each input_layout and output_layout.
  880. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  881. reshape_info->set_pre_operator_name(pre_operator_info->name());
  882. reshape_info->set_pre_operator_index(out_index);
  883. if (find_next_node) {
  884. next_stra_costs = next_operator_info->strategy_cost();
  885. reshape_info->set_next_operator_name(next_operator_info->name());
  886. reshape_info->set_next_operator_index(in_index);
  887. }
  888. bool is_prev_param = pre_node->isa<Parameter>();
  889. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param) !=
  890. SUCCESS) {
  891. MS_LOG(EXCEPTION) << "reshape genetate strategy_costs failed!";
  892. }
  893. }
  894. }
  895. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  896. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  897. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  898. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  899. // for each OperatorInfo;
  900. // Step 1.1: Deal with 'Reshape':
  901. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  902. // layout as its output layout.
  903. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  904. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  905. // for each edge, based on the strategies of two OperatorInfos;
  906. // Step 3: Augment the costgraph:
  907. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  908. // operator for this Parameter, and add an edge for the use of this Parameter by each
  909. // subsequent operator;
  910. // Step 3.1: Calculate memory usage:
  911. // note the memory usage calculation is different in training phase and inference phase.
  912. // Step 4: Run the Dynamic Programming algorithm:
  913. // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge
  914. // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input
  915. // tensor layout. Note that there may be several connected components in the costgraph, and the DP algorithm
  916. // runs on each of them.
  917. //
  918. // OUTPUT: the determined strategy for each operator.
  919. // Step 1
  920. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  921. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  922. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  923. << entire_costgraph->GetOperators().size() << " operators.";
  924. } else {
  925. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  926. }
  927. } else {
  928. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  929. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  930. << entire_costgraph->GetOperators().size() << " operators.";
  931. } else {
  932. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  933. }
  934. }
  935. // Step 1.1
  936. ReshapeCostCompute(all_nodes);
  937. // Step 2
  938. ConstructCostGraphEdges(all_nodes);
  939. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  940. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  941. // Step 3: Augment the costgraph.
  942. AugmentCostGraph(all_nodes);
  943. MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size()
  944. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  945. // Step 3.1: Calculate the memory usage
  946. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  947. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  948. }
  949. // Step 4: run DP algorithm on the costgraph.
  950. if (GetStrategy(entire_costgraph) != SUCCESS) {
  951. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  952. return FAILED;
  953. }
  954. MS_LOG(INFO) << "Searching strategy succeeded.";
  955. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  956. MS_LOG(INFO) << "Init selected strategy succeeded.";
  957. } else {
  958. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  959. }
  960. // print the selected strategy
  961. for (auto &op : entire_costgraph->GetOperators()) {
  962. StrategyPtr s_strategy = op->selected_strategy();
  963. MS_LOG(INFO) << op->name() << " : The strategy is:";
  964. PrintStrategy(s_strategy);
  965. }
  966. return SUCCESS;
  967. }
  968. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  969. std::vector<std::vector<std::string>> input_tensor_names) {
  970. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  971. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  972. if (it->first == input_tensor_names[j][k]) {
  973. input_tensor_names[j][k] = it->second;
  974. break;
  975. }
  976. }
  977. }
  978. return input_tensor_names;
  979. }
  980. CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node) {
  981. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  982. if (prim->name() == TUPLE_GETITEM || prim->name() == DEPEND) {
  983. auto prev_cnode = cnode->input(1)->cast<CNodePtr>();
  984. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  985. return nullptr;
  986. }
  987. auto prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  988. while (prev_prim->name() == TUPLE_GETITEM || prev_prim->name() == DEPEND) {
  989. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  990. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  991. return nullptr;
  992. }
  993. prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  994. }
  995. return prev_cnode;
  996. }
  997. return nullptr;
  998. }
  999. void ModifyInputsTensorNameListIfOperatorInfoCreated(const std::string &name, const std::string &uniqueid) {
  1000. size_t iter_ops = 0;
  1001. for (auto op : entire_costgraph->GetOperators()) {
  1002. if (op->name() == name) {
  1003. break;
  1004. }
  1005. iter_ops = iter_ops + 1;
  1006. }
  1007. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1008. for (size_t i = 0; i < input_tensor_names.size(); i++) {
  1009. for (size_t j = 0; j < input_tensor_names[i].size(); j++) {
  1010. if (input_tensor_names[i][j] == uniqueid) {
  1011. input_tensor_names[i][j] = input_tensor_names[iter_ops][0];
  1012. }
  1013. }
  1014. }
  1015. entire_costgraph->set_inputs_tensor_name_list(input_tensor_names);
  1016. }
  1017. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1018. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  1019. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  1020. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1021. << entire_costgraph->GetOperators().size() << " operators.";
  1022. } else {
  1023. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1024. }
  1025. } else {
  1026. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1027. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1028. << entire_costgraph->GetOperators().size() << " operators.";
  1029. } else {
  1030. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1031. }
  1032. }
  1033. ReshapeCostCompute(all_nodes);
  1034. auto ops = entire_costgraph->GetOperators();
  1035. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1036. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1037. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1038. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1039. }
  1040. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1041. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
  1042. std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
  1043. graph = EliminateGraph(graph, eli_list, index_list);
  1044. size_t num_device = g_device_manager->DeviceNum();
  1045. double device_memory = entire_costgraph->GetDeviceMemory();
  1046. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1047. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1048. } else {
  1049. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1050. return FAILED;
  1051. }
  1052. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list);
  1053. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1054. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1055. } else {
  1056. MS_LOG(ERROR) << "Init selected strategy failed.";
  1057. return FAILED;
  1058. }
  1059. // print the selected strategy
  1060. for (auto &op : entire_costgraph->GetOperators()) {
  1061. StrategyPtr s_strategy = op->selected_strategy();
  1062. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1063. PrintStrategy(s_strategy);
  1064. }
  1065. return SUCCESS;
  1066. }
  1067. } // namespace parallel
  1068. } // namespace mindspore