You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 52 kB

5 years ago
5 years ago
5 years ago
6 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "parallel/step_auto_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include "ir/anf.h"
  28. #include "ir/param_value_py.h"
  29. #include "ir/tensor.h"
  30. #include "optimizer/opt.h"
  31. #include "optimizer/optimizer.h"
  32. #include "parallel/auto_parallel/dp_algo_costmodel.h"
  33. #include "parallel/auto_parallel/edge_costmodel.h"
  34. #include "parallel/auto_parallel/graph_costmodel.h"
  35. #include "parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  36. #include "parallel/auto_parallel/rec_core/rec_parse_graph.h"
  37. #include "parallel/auto_parallel/rec_core/rec_partition.h"
  38. #include "parallel/context.h"
  39. #include "parallel/ops_info/tmp_identity_info.h"
  40. #include "parallel/ops_info/reshape_info.h"
  41. #include "parallel/step_parallel.h"
  42. #include "parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  43. #include "pipeline/parse/python_adapter.h"
  44. #include "pipeline/pipeline.h"
  45. namespace mindspore {
  46. namespace parallel {
  47. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  48. MS_EXCEPTION_IF_NULL(root);
  49. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  50. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  51. // assume no change to graph
  52. bool changes = false;
  53. // control whether use model_parallel mode
  54. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  55. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  56. return changes;
  57. }
  58. // check whether strategy_search_mode is valid
  59. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  60. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  61. // Setting searching mode: dynanic programming as default.
  62. strategy_search_mode = DYNAMIC_PROGRAMMING;
  63. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  64. }
  65. struct timeval start_time, end_time;
  66. (void)gettimeofday(&start_time, nullptr);
  67. if (MsContext::GetInstance()->save_graphs_flag()) {
  68. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  69. }
  70. MS_LOG(INFO) << "Now entering step auto parallel";
  71. TOTAL_OPS = 0;
  72. AnfNodePtr ret = root->get_return();
  73. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  74. if (ParallelInit() != SUCCESS) {
  75. MS_LOG(EXCEPTION) << "Parallel init failed";
  76. }
  77. // mark the forward cnodes, parallel only care these nodes
  78. MarkForwardCNode(root);
  79. if (FindCommunicationOp(all_nodes)) {
  80. MS_LOG(EXCEPTION) << "The graph contain communication op";
  81. }
  82. // search parallelization strategy
  83. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  84. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  85. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  86. }
  87. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  88. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  89. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  90. }
  91. } else {
  92. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  93. }
  94. (void)gettimeofday(&end_time, nullptr);
  95. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  96. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  97. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  98. root->set_flag(AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  99. return changes;
  100. }
  101. // Given the node, return whether each input is a parameter or a output of a operator.
  102. // The returned boolean vector should be the same order of the inputs, thus its implementation
  103. // is closely consistent with ExtractShape() in step_parallel.cc
  104. std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
  105. std::vector<bool> is_parameter;
  106. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  107. for (size_t i = 1; i < node_inputs.size(); ++i) {
  108. auto input = node_inputs[i];
  109. if (input->isa<Parameter>()) {
  110. auto input_parameter = input->cast<ParameterPtr>();
  111. if (input_parameter->has_default()) {
  112. auto param_value = std::dynamic_pointer_cast<ParamValuePy>(input_parameter->default_param());
  113. bool require_grad = py::cast<bool>(parse::python_adapter::GetPyObjAttr(param_value->value(), "requires_grad"));
  114. is_parameter.push_back(require_grad);
  115. } else {
  116. is_parameter.push_back(false);
  117. }
  118. } else if (input->isa<CNode>() || IsValueNode<tensor::Tensor>(input) || IsValueNode<RefKey>(input)) {
  119. is_parameter.push_back(false);
  120. }
  121. }
  122. return is_parameter;
  123. }
  124. // Given the type, return the number of bytes to represent this type
  125. size_t GetLengthOfDataType(const TypePtr &type) {
  126. switch (type->type_id()) {
  127. case kNumberTypeBool:
  128. return sizeof(bool);
  129. case kNumberTypeInt8:
  130. return sizeof(int8_t);
  131. case kNumberTypeInt16:
  132. return sizeof(int16_t);
  133. case kNumberTypeInt32:
  134. return sizeof(int32_t);
  135. case kNumberTypeInt64:
  136. return sizeof(int64_t);
  137. case kNumberTypeUInt8:
  138. return sizeof(uint8_t);
  139. case kNumberTypeUInt16:
  140. return sizeof(uint16_t);
  141. case kNumberTypeUInt32:
  142. return sizeof(uint32_t);
  143. case kNumberTypeUInt64:
  144. return sizeof(uint64_t);
  145. case kNumberTypeFloat16:
  146. return sizeof(float) / 2;
  147. case kNumberTypeFloat32:
  148. return sizeof(float);
  149. case kNumberTypeFloat64:
  150. return sizeof(double);
  151. case kNumberTypeInt:
  152. return sizeof(int);
  153. case kNumberTypeUInt:
  154. return sizeof(unsigned int);
  155. case kNumberTypeFloat:
  156. return sizeof(float);
  157. default:
  158. MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
  159. }
  160. }
  161. size_t GetInputsTypeLen(const AnfNodePtr &input) {
  162. MS_EXCEPTION_IF_NULL(input);
  163. if (!input->isa<CNode>() && !input->isa<Parameter>() && !IsValueNode<tensor::Tensor>(input)) {
  164. MS_LOG(EXCEPTION) << "The input node is not a cnode or parameter or tensor";
  165. }
  166. size_t input_type_len = 0;
  167. auto type = input->Type();
  168. MS_EXCEPTION_IF_NULL(type);
  169. if (type->isa<mindspore::TensorType>()) {
  170. auto input_element_type = type->cast<mindspore::TensorTypePtr>()->element();
  171. input_type_len = GetLengthOfDataType(input_element_type);
  172. } else {
  173. MS_LOG(EXCEPTION) << "Unknown type: " << type->type_name();
  174. }
  175. return input_type_len;
  176. }
  177. std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
  178. MS_EXCEPTION_IF_NULL(node);
  179. std::vector<size_t> inputs_type_len;
  180. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  181. // extract input element length
  182. for (auto &input : node_inputs) {
  183. if (IsValueNode<RefKey>(input)) {
  184. auto func_graph = node->func_graph();
  185. MS_EXCEPTION_IF_NULL(func_graph);
  186. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  187. if (parameters.size() != 1) {
  188. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  189. }
  190. inputs_type_len.push_back(GetInputsTypeLen(parameters[0]));
  191. } else if (input->isa<CNode>() || input->isa<Parameter>() || IsValueNode<tensor::Tensor>(input)) {
  192. // extract input shape from parameter and apply node
  193. inputs_type_len.push_back(GetInputsTypeLen(input));
  194. }
  195. }
  196. return inputs_type_len;
  197. }
  198. std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
  199. MS_EXCEPTION_IF_NULL(node);
  200. std::vector<TypePtr> outputs_type;
  201. // extract output element type
  202. auto primary_output_type = node->Type();
  203. MS_EXCEPTION_IF_NULL(primary_output_type);
  204. if (primary_output_type->isa<mindspore::Tuple>()) {
  205. // in this case, the output is a tuple
  206. auto tuple_output_type = primary_output_type->cast<mindspore::TuplePtr>();
  207. auto elements = tuple_output_type->elements();
  208. for (auto &ele : elements) {
  209. if (ele->isa<mindspore::TensorType>()) {
  210. auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
  211. outputs_type.push_back(ele_element_type);
  212. } else {
  213. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  214. }
  215. }
  216. } else {
  217. // in this case, the output is a single tensor
  218. if (primary_output_type->isa<mindspore::TensorType>()) {
  219. auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
  220. outputs_type.push_back(element_type);
  221. } else {
  222. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  223. }
  224. }
  225. return outputs_type;
  226. }
  227. bool IsElementWiseOperator(const std::string &op_name) {
  228. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU,
  229. SQRT, CAST, POW, EXP, LOG, COS,
  230. ACOS, LOGICALNOT, NEG, SQUARE, SIGMOID};
  231. auto iter = elementwise_op.find(op_name);
  232. return (iter != elementwise_op.end());
  233. }
  234. bool IsSplittableOperator(const std::string &op_name) {
  235. // clang-format off
  236. static const std::set<std::string> splittable_op =
  237. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  238. FLOORDIV, L2_NORMALIZE, TENSOR_ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  239. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  240. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP,
  241. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT,
  242. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE, SPARSE_GATHERV2,
  243. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS};
  244. // clang-format on
  245. auto iter = splittable_op.find(op_name);
  246. return (iter != splittable_op.end());
  247. }
  248. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  249. MS_EXCEPTION_IF_NULL(cnode);
  250. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  251. if (prim_node == nullptr) {
  252. return false;
  253. }
  254. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  255. if (prim == nullptr) {
  256. return false;
  257. }
  258. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  259. if (bool_result) {
  260. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  261. } else if (prim->name() == CAST) {
  262. if (cnode->fullname_with_scope().find(OPTIMIZER_SUB_STRING) != std::string::npos) {
  263. // Do not care CASTs from optimizer
  264. return false;
  265. }
  266. return true;
  267. }
  268. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  269. }
  270. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, StrategyMap *stra_map) {
  271. MS_EXCEPTION_IF_NULL(prim);
  272. MS_EXCEPTION_IF_NULL(cnode);
  273. auto attrs = prim->attrs();
  274. std::vector<Shapes> shape_list = ExtractShape(cnode);
  275. if (shape_list.empty()) {
  276. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  277. }
  278. // Create an OperatorInfo instance
  279. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  280. MS_EXCEPTION_IF_NULL(operator_info);
  281. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  282. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  283. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  284. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  285. return nullptr;
  286. }
  287. // Set the data type for inputs and outputs of this OperatorInfo
  288. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  289. auto outputs_type = ExtractOutputTypeByNode(cnode);
  290. std::vector<size_t> outputs_type_length;
  291. outputs_type_length.reserve(outputs_type.size());
  292. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  293. GetLengthOfDataType);
  294. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  295. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  296. return nullptr;
  297. }
  298. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  299. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  300. return nullptr;
  301. }
  302. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  303. // ANF graph
  304. auto &inputs = cnode->inputs();
  305. std::vector<ValuePtr> input_value;
  306. for (size_t index = 1; index < inputs.size(); ++index) {
  307. if (inputs[index]->isa<ValueNode>()) {
  308. input_value.push_back(GetValueNode(inputs[index]));
  309. } else {
  310. input_value.emplace_back(nullptr);
  311. }
  312. }
  313. operator_info->set_input_value(input_value);
  314. operator_info->set_outputs_dtype(cnode->Type());
  315. operator_info->set_cnode(cnode);
  316. // key of strategy map
  317. std::string strategy_key_name = NodeParameterName(cnode);
  318. bool load_strategy_from_ckpt =
  319. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  320. // If no strategy has been configured for this operator, then candidate strategies are generated for
  321. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  322. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  323. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  324. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  325. // BatchParallelInfo operator
  326. operator_info->ComputeBatchSplitFlagList();
  327. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  328. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  329. return nullptr;
  330. }
  331. } else {
  332. // In this case, the configured strategy should be extracted to help setting cost
  333. StrategyPtr strategyPtr;
  334. if (load_strategy_from_ckpt) {
  335. strategyPtr = (*stra_map)[strategy_key_name];
  336. } else {
  337. strategyPtr = parallel::ExtractStrategy(attrs);
  338. }
  339. if (strategyPtr != nullptr) {
  340. if (prim->name() == RESHAPE) {
  341. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  342. }
  343. // Set cost for this configured strategy
  344. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  345. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  346. } else if (FULLY_USE_DEVICES) {
  347. // If configured to fully use devices, then checking for the user-specified strategy
  348. int32_t used_devices = operator_info->used_devices();
  349. MS_EXCEPTION_IF_NULL(g_device_manager);
  350. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  351. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  352. if (used_devices == 1) {
  353. return operator_info;
  354. }
  355. // 'used_devices == -1' means that 'used_devices_' is not set
  356. if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) {
  357. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  358. << "but the specified strategy uses device: " << used_devices
  359. << ", total devices: " << total_device_num;
  360. }
  361. }
  362. }
  363. }
  364. return operator_info;
  365. }
  366. // Using CNode's UniqueIds to construct nodes
  367. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  368. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  369. entire_costgraph = std::make_shared<CostGraph>();
  370. entire_costgraph->SetDeviceMemoryAndCostParameter();
  371. // The map from CNode's UniqueId to its operatorInfo
  372. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  373. // extract strategy from checkpoint for multi-train
  374. StrategyMap stra_map;
  375. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  376. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  377. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  378. }
  379. }
  380. // Step 1
  381. for (auto &node : all_nodes) {
  382. // NOTE: we only care about splittable Primitive operators
  383. auto cnode = node->cast<CNodePtr>();
  384. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  385. if (bool_result) {
  386. continue;
  387. }
  388. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  389. if (!IsAutoParallelCareNode(cnode)) {
  390. // Needed by rec_parser
  391. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  392. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  393. if (prev_cnode != nullptr) {
  394. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  395. }
  396. }
  397. continue;
  398. }
  399. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  400. MS_EXCEPTION_IF_NULL(prim);
  401. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  402. if (search_cnode == from_cnode_to_info.end()) {
  403. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  404. if (operator_info == nullptr) {
  405. return FAILED;
  406. }
  407. // Needed by rec_parser
  408. operator_info->set_type(prim->name());
  409. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  410. entire_costgraph->AddOperator(operator_info);
  411. (void)cnode->set_operator_info(operator_info);
  412. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  413. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  414. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  415. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  416. // Needed by rec_parser
  417. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  418. } else {
  419. // Two CNODEs' UniqueIds should not be equal
  420. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  421. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  422. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  423. }
  424. }
  425. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  426. return SUCCESS;
  427. }
  428. // Using CNode's UniqueIdThroughCopys to construct nodes
  429. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  430. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  431. entire_costgraph = std::make_shared<CostGraph>();
  432. entire_costgraph->SetDeviceMemoryAndCostParameter();
  433. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  434. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  435. // extract strategy from checkpoint for multi-train
  436. StrategyMap stra_map;
  437. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  438. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  439. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  440. }
  441. }
  442. for (auto &node : all_nodes) {
  443. // NOTE: we only care about splittable Primitive operators
  444. auto cnode = node->cast<CNodePtr>();
  445. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  446. if (bool_result) {
  447. continue;
  448. }
  449. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  450. if (!IsAutoParallelCareNode(cnode)) {
  451. // Needed by rec_parser
  452. if (ParallelContext::GetInstance()->strategy_search_mode() == RECURSIVE_PROGRAMMING) {
  453. auto prev_cnode = GetInternalOperatorInfo(cnode, prim_anf_node);
  454. if (prev_cnode != nullptr) {
  455. entire_costgraph->add_tuple_getitem(std::make_pair(cnode->UniqueId(), prev_cnode->UniqueId()));
  456. }
  457. }
  458. continue;
  459. }
  460. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  461. // Find the operatorInfo if it exists
  462. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  463. if (search_cnode == from_cnode_to_info.end()) {
  464. // In this case, the corresponding OperatorInfo is not created, create the new one.
  465. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  466. if (operator_info == nullptr) {
  467. return FAILED;
  468. }
  469. // Needed by rec_parser
  470. operator_info->set_type(prim->name());
  471. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  472. entire_costgraph->AddOperator(operator_info);
  473. (void)cnode->set_operator_info(operator_info);
  474. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  475. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  476. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  477. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  478. // Needed by rec_parser
  479. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  480. } else {
  481. auto current_op_ptr = search_cnode->second;
  482. if (current_op_ptr == nullptr) {
  483. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  484. } else {
  485. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  486. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  487. (current_op_ptr->name().find(prim->name()) == std::string::npos);
  488. if (is_find_wrong) {
  489. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  490. << " does not match the Prim: " << prim->name();
  491. }
  492. (void)cnode->set_operator_info(current_op_ptr);
  493. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  494. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  495. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  496. }
  497. }
  498. }
  499. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  500. return SUCCESS;
  501. }
  502. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  503. // Step 2
  504. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  505. for (auto &node : all_nodes) {
  506. auto cnode = node->cast<CNodePtr>();
  507. bool bool_result_cnode = (cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0));
  508. if (bool_result_cnode) {
  509. continue;
  510. }
  511. auto &inputs = cnode->inputs();
  512. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  513. if (!IsAutoParallelCareNode(cnode)) {
  514. continue;
  515. }
  516. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  517. size_t edge_count = 0;
  518. for (size_t i = 1; i < inputs.size(); ++i) {
  519. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  520. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  521. if (bool_result_prev_cnode) {
  522. continue;
  523. }
  524. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  525. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  526. size_t output_index = 0;
  527. bool bool_result =
  528. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  529. while (bool_result) {
  530. if (IsAutoParallelCareNode(prev_cnode)) {
  531. std::string edge_name =
  532. prev_cnode->operator_info()->name() + OPERATOR_TO_OPERATOR_CONNECTOR + cnode->operator_info()->name();
  533. // If the edge between these two operators already has been added, then the edge will not be added again.
  534. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, i - 1)) {
  535. break;
  536. }
  537. EdgePtr edge_ptr;
  538. MS_LOG(INFO) << "Creating edge: " << edge_name;
  539. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  540. (ELEMENTWISE_OP_STRA_FOLLOW && IsElementWiseOperator(prev_prim->name()));
  541. if (follow_strategy) {
  542. // Redistribution in not allowed on the edge.
  543. // Elementwise operators have the same strategy as their previous operators.
  544. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  545. output_index, i - 1, false, true);
  546. } else {
  547. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  548. output_index, i - 1, false);
  549. }
  550. // Init costs for this edge
  551. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  552. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  553. }
  554. cnode->operator_info()->AddPrevEdge(edge_ptr);
  555. prev_cnode->operator_info()->AddSuccEdge(edge_ptr);
  556. entire_costgraph->AddEdge(prev_cnode->operator_info(), cnode->operator_info(), edge_ptr);
  557. MS_LOG(INFO) << "Successfully adding the edge between " << prev_cnode->operator_info()->name() << " and "
  558. << cnode->operator_info()->name();
  559. edge_count++;
  560. break;
  561. } else if (prev_prim->name() == TUPLE_GETITEM) {
  562. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  563. // this 'tuple_getitem'
  564. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  565. output_index = IntToSize(GetValue<int>(GetValueNode(prev_cnode->input(2))));
  566. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  567. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  568. if (bool_result_tuple) {
  569. break;
  570. }
  571. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  572. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  573. if (!IsAutoParallelCareNode(prev_cnode)) {
  574. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  575. }
  576. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  577. << "and creating an edge between the Operator before "
  578. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  579. } else if (prev_prim->name() == DEPEND) {
  580. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  581. // this 'depend'
  582. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  583. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  584. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  585. if (bool_result_depend) {
  586. break;
  587. }
  588. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  589. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  590. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  591. << "and creating an edge between the Operator before "
  592. << "'depend' and the Operator after 'depend'.";
  593. }
  594. bool_result =
  595. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  596. }
  597. }
  598. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << cnode->operator_info()->name();
  599. }
  600. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  601. }
  602. std::pair<AnfNodePtr, std::vector<AnfNodePtr>> CNodeWithRefKeys(const AnfNodePtr &cnode) {
  603. MS_EXCEPTION_IF_NULL(cnode);
  604. std::vector<AnfNodePtr> refkeys;
  605. if (cnode->isa<CNode>()) {
  606. auto cnode_ptr = cnode->cast<CNodePtr>();
  607. auto inputs = cnode_ptr->inputs();
  608. for (auto &one_input : inputs) {
  609. if (IsValueNode<RefKey>(one_input)) {
  610. refkeys.push_back(one_input);
  611. }
  612. }
  613. if (refkeys.size() >= 1) {
  614. return std::make_pair(cnode, refkeys);
  615. }
  616. }
  617. return {nullptr, refkeys};
  618. }
  619. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  620. // Step 3
  621. for (auto &node : all_nodes) {
  622. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  623. if ((!node->isa<Parameter>()) && (cnode_with_refkeys.first == nullptr)) {
  624. continue;
  625. }
  626. std::string parameter_name;
  627. AnfNodePtr target_parameter = nullptr;
  628. AnfNodeIndexSet target_set;
  629. if (cnode_with_refkeys.first != nullptr) {
  630. // Dealing with the RefKey case
  631. auto refkeys = cnode_with_refkeys.second;
  632. auto cnode = cnode_with_refkeys.first;
  633. auto cnode_ptr = cnode->cast<CNodePtr>();
  634. if (cnode_ptr == nullptr || !IsValueNode<Primitive>(cnode_ptr->input(0))) {
  635. continue;
  636. }
  637. if (!IsAutoParallelCareNode(cnode_ptr)) {
  638. continue;
  639. }
  640. if (refkeys.size() > 1) {
  641. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << " 's inputs have more than 1 RefKeys.";
  642. }
  643. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  644. auto cnode_func_graph = cnode->func_graph();
  645. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  646. // Find the RefKey being used
  647. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  648. for (auto &candidate : candidate_set_by_refkey) {
  649. auto candidate_node = candidate.first;
  650. auto c = candidate_node->cast<CNodePtr>();
  651. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  652. continue;
  653. }
  654. if (!IsAutoParallelCareNode(c)) {
  655. continue;
  656. }
  657. target_set.add(candidate);
  658. }
  659. // Find the corresponding Parameter being used
  660. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  661. if (parameters.size() != 1) {
  662. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  663. }
  664. parameter_name = parameters[0]->cast<ParameterPtr>()->name();
  665. target_parameter = parameters[0];
  666. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  667. for (auto &candidate : candidate_set_by_para) {
  668. auto candidate_node = candidate.first;
  669. auto c = candidate_node->cast<CNodePtr>();
  670. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  671. continue;
  672. }
  673. if (!IsAutoParallelCareNode(c)) {
  674. continue;
  675. }
  676. (void)target_set.insert(candidate);
  677. }
  678. } else if (node->isa<Parameter>()) {
  679. // Dealing with the Parameter case
  680. MS_EXCEPTION_IF_NULL(node->func_graph());
  681. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  682. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  683. for (auto &candidate : candidate_set) {
  684. auto candidate_node = candidate.first;
  685. auto c = candidate_node->cast<CNodePtr>();
  686. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  687. continue;
  688. }
  689. if (!IsAutoParallelCareNode(c)) {
  690. continue;
  691. }
  692. (void)target_set.insert(candidate);
  693. }
  694. // In this case, node is a Parameter
  695. parameter_name = node->cast<ParameterPtr>()->name();
  696. target_parameter = node;
  697. }
  698. if (target_set.size() <= 1) {
  699. continue;
  700. }
  701. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  702. std::set<std::string> target_without_duplicate;
  703. for (auto &target : target_set) {
  704. auto target_cnode = target.first->cast<CNodePtr>();
  705. auto input_index = target.second;
  706. (void)target_without_duplicate.insert(std::to_string(input_index) + target_cnode->operator_info()->name());
  707. }
  708. if (target_without_duplicate.size() <= 1) {
  709. continue;
  710. }
  711. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  712. OperatorInfoPtr tmp_identity_ptr;
  713. bool new_identity = false;
  714. std::string tmp_identity_name;
  715. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  716. if (returned_identity != nullptr) {
  717. // In this case, the TmpIdentityInfo instance has already been created
  718. new_identity = false;
  719. tmp_identity_ptr = returned_identity;
  720. tmp_identity_name = tmp_identity_ptr->name();
  721. } else {
  722. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  723. new_identity = true;
  724. // 1) extract input shape from this Parameter
  725. MS_EXCEPTION_IF_NULL(target_parameter);
  726. AbstractBasePtr abstract = target_parameter->abstract();
  727. if (abstract == nullptr) {
  728. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  729. }
  730. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  731. if (input_shape == nullptr) {
  732. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  733. }
  734. std::vector<int> shape_int = input_shape->shape();
  735. Shape shape;
  736. (void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
  737. [](int sub_shape) { return static_cast<int32_t>(sub_shape); });
  738. Shapes inputs_shape = {shape};
  739. Shapes outputs_shape = {shape};
  740. // 2) init the attr
  741. std::unordered_map<std::string, ValuePtr> attr = {};
  742. // Create the TmpIdentity instance
  743. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  744. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  745. TOTAL_OPS++;
  746. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  747. // Set the parameter and type lengths for inputs and outputs
  748. std::vector<bool> is_parameter;
  749. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  750. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  751. if (casted_target_parameter->has_default()) {
  752. auto param_value = std::dynamic_pointer_cast<ParamValuePy>(casted_target_parameter->default_param());
  753. bool require_grad = py::cast<bool>(parse::python_adapter::GetPyObjAttr(param_value->value(), "requires_grad"));
  754. is_parameter.push_back(require_grad);
  755. } else {
  756. is_parameter.push_back(false);
  757. }
  758. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  759. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  760. }
  761. auto node_type = target_parameter->Type();
  762. if (node_type->isa<mindspore::TensorType>()) {
  763. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  764. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  765. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  766. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  767. }
  768. } else {
  769. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  770. }
  771. // Generate strategies for this TmpIdentityInfo instance;
  772. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  773. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  774. }
  775. }
  776. // A flag recording whether new edges have been created or not
  777. bool add_identity_edge = false;
  778. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  779. for (auto &target : target_set) {
  780. auto target_cnode = target.first->cast<CNodePtr>();
  781. auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
  782. auto input_index = target.second;
  783. std::string edge_name =
  784. std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_cnode->operator_info()->name();
  785. // If the edge between these two operators already has been added, then the edge will not be added again.
  786. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, IntToSize(input_index - 1))) {
  787. continue;
  788. }
  789. std::shared_ptr<Edge> edge_ptr = std::make_shared<Edge>(
  790. edge_name, tmp_identity_ptr, target_cnode->operator_info(), 0, input_index - 1, false, true);
  791. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  792. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  793. }
  794. target_cnode->operator_info()->AddPrevEdge(edge_ptr);
  795. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  796. entire_costgraph->AddEdge(tmp_identity_ptr, target_cnode->operator_info(), edge_ptr);
  797. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  798. << target_cnode->operator_info()->name();
  799. add_identity_edge = true;
  800. }
  801. if (new_identity && add_identity_edge) {
  802. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  803. entire_costgraph->AddOperator(tmp_identity_ptr);
  804. }
  805. }
  806. }
  807. bool FindReshape(const CNodePtr &cnode) {
  808. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  809. return false;
  810. }
  811. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  812. if (!IsParallelCareNode(cnode) || (cnode->operator_info() == nullptr)) {
  813. return false;
  814. }
  815. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  816. MS_EXCEPTION_IF_NULL(prim);
  817. OperatorInfoPtr operator_info = cnode->operator_info();
  818. if (operator_info == nullptr) {
  819. MS_LOG(EXCEPTION) << "Failure:Primitive " << prim->ToString() << " OperatorInstance is nullptr";
  820. }
  821. if (prim->name() != RESHAPE) {
  822. return false;
  823. }
  824. return true;
  825. }
  826. // find previous node, then obtain its strategy_cost_ vector to get its layout vector.
  827. bool FindPreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int32_t *out_index) {
  828. // if previous node is a parameter, handle it in the outsize.
  829. if (node->isa<Parameter>()) {
  830. return false;
  831. }
  832. if (!node->isa<CNode>()) {
  833. return false;
  834. }
  835. CNodePtr cnode = node->cast<CNodePtr>();
  836. if (!IsValueNode<Primitive>(cnode->input(0))) {
  837. return false;
  838. }
  839. if (IsParallelCareNode(cnode) && (cnode->operator_info() != nullptr)) {
  840. *pre_operator_info = cnode->operator_info();
  841. *out_index = 0;
  842. return true;
  843. }
  844. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  845. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  846. if (prim->name() == TUPLE_GETITEM) {
  847. *out_index = GetTupleGetItemIndex(cnode);
  848. // find tuple_get_item's previous node
  849. auto pre_node = cnode->input(1);
  850. if (!pre_node->isa<CNode>()) {
  851. MS_LOG(EXCEPTION) << "tuple get item's second input is not a cnode";
  852. }
  853. CNodePtr pre_cnode = pre_node->cast<CNodePtr>();
  854. if (IsParallelCareNode(pre_cnode) && (pre_cnode->operator_info() != nullptr)) {
  855. *pre_operator_info = pre_cnode->operator_info();
  856. return true;
  857. }
  858. return false;
  859. }
  860. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  861. if (prim->name() == DEPEND && index != 1) {
  862. continue;
  863. }
  864. if (!FindPreNodeStraCosts(cnode->inputs()[index], pre_operator_info, out_index)) {
  865. continue;
  866. }
  867. return true;
  868. }
  869. MS_LOG(WARNING) << "FindPreNodeStraCosts failed, if reshape is not the first primitive, there must be some error";
  870. return false;
  871. }
  872. // find next node, then obtain its strategy_cost_ vector to get its layout vector.
  873. // if reshape's output connect to several primitive, return the first layout found
  874. bool FindNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int32_t *in_index) {
  875. MS_EXCEPTION_IF_NULL(cnode);
  876. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  877. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  878. MS_EXCEPTION_IF_NULL(manager);
  879. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  880. for (auto &node_pair : node_set) {
  881. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  882. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  883. continue;
  884. }
  885. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  886. MS_EXCEPTION_IF_NULL(prim_anf_node);
  887. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  888. MS_EXCEPTION_IF_NULL(node_prim);
  889. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  890. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  891. continue;
  892. }
  893. if (IsParallelCareNode(use_apply) && (use_apply->operator_info() != nullptr)) {
  894. MS_LOG(INFO) << "FindNextNodeStraCosts success prim " << node_prim->name();
  895. *next_operator_info = use_apply->operator_info();
  896. *in_index = node_pair.second - 1;
  897. return true;
  898. }
  899. MS_LOG(DEBUG) << "FindNextNodeStraCosts failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  900. << " " << (use_apply->operator_info() != nullptr);
  901. if (FindNextNodeStraCosts(use_apply, next_operator_info, in_index)) {
  902. return true;
  903. }
  904. }
  905. return false;
  906. }
  907. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  908. for (auto node : all_nodes) {
  909. auto cnode = node->cast<CNodePtr>();
  910. if (!FindReshape(cnode)) {
  911. continue;
  912. }
  913. MS_ASSERT(cnode->inputs().size() == 3);
  914. // get previous node's strategy_cost_
  915. auto pre_node = cnode->input(1);
  916. int32_t out_index = 0;
  917. OperatorInfoPtr pre_operator_info;
  918. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  919. if (pre_node->isa<Parameter>()) {
  920. OperatorInfoPtr operator_info = cnode->operator_info();
  921. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  922. reshape_info->SetCostForReshapeWithParameter();
  923. pre_operator_info = reshape_info;
  924. pre_stra_costs = reshape_info->strategy_cost();
  925. } else {
  926. if (!FindPreNodeStraCosts(pre_node, &pre_operator_info, &out_index)) {
  927. MS_LOG(EXCEPTION) << "FindPreNodeStraCosts for reshape failed";
  928. }
  929. pre_stra_costs = pre_operator_info->strategy_cost();
  930. }
  931. // get next node's strategy_cost_
  932. int32_t in_index = 0;
  933. OperatorInfoPtr next_operator_info;
  934. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  935. bool find_next_node = FindNextNodeStraCosts(cnode, &next_operator_info, &in_index);
  936. if (!find_next_node) {
  937. MS_LOG(INFO) << "FindNextNodeStraCosts for reshape failed";
  938. }
  939. // set input_layout and output_layout for reshape.
  940. // init reshape and set cost for each input_layout and output_layout.
  941. OperatorInfoPtr operator_info = cnode->operator_info();
  942. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  943. reshape_info->set_pre_operator_name(pre_operator_info->name());
  944. reshape_info->set_pre_operator_index(out_index);
  945. if (find_next_node) {
  946. next_stra_costs = next_operator_info->strategy_cost();
  947. reshape_info->set_next_operator_name(next_operator_info->name());
  948. reshape_info->set_next_operator_index(in_index);
  949. }
  950. bool is_prev_param = pre_node->isa<Parameter>();
  951. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param) !=
  952. SUCCESS) {
  953. MS_LOG(EXCEPTION) << "reshape genetate strategy_costs failed!";
  954. }
  955. }
  956. }
  957. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  958. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  959. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  960. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  961. // for each OperatorInfo;
  962. // Step 1.1: Deal with 'Reshape':
  963. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  964. // layout as its output layout.
  965. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  966. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  967. // for each edge, based on the strategies of two OperatorInfos;
  968. // Step 3: Augment the costgraph:
  969. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  970. // operator for this Parameter, and add an edge for the use of this Parameter by each
  971. // subsequent operator;
  972. // Step 3.1: Calculate memory usage:
  973. // note the memory usage calculation is different in training phase and inference phase.
  974. // Step 4: Run the Dynamic Programming algorithm:
  975. // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge
  976. // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input
  977. // tensor layout. Note that there may be several connected components in the costgraph, and the DP algorithm
  978. // runs on each of them.
  979. //
  980. // OUTPUT: the determined strategy for each operator.
  981. // Step 1
  982. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  983. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  984. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  985. << entire_costgraph->GetOperators().size() << " operators.";
  986. } else {
  987. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  988. }
  989. } else {
  990. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  991. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  992. << entire_costgraph->GetOperators().size() << " operators.";
  993. } else {
  994. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  995. }
  996. }
  997. // Step 1.1
  998. ReshapeCostCompute(all_nodes);
  999. // Step 2
  1000. ConstructCostGraphEdges(all_nodes);
  1001. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  1002. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  1003. // Step 3: Augment the costgraph.
  1004. AugmentCostGraph(all_nodes);
  1005. MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size()
  1006. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  1007. // Step 3.1: Calculate the memory usage
  1008. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  1009. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  1010. }
  1011. // Step 4: run DP algorithm on the costgraph.
  1012. if (GetStrategy(entire_costgraph) != SUCCESS) {
  1013. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  1014. return FAILED;
  1015. }
  1016. MS_LOG(INFO) << "Searching strategy succeeded.";
  1017. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1018. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1019. } else {
  1020. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  1021. }
  1022. // print the selected strategy
  1023. for (auto &op : entire_costgraph->GetOperators()) {
  1024. StrategyPtr s_strategy = op->selected_strategy();
  1025. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1026. PrintStrategy(s_strategy);
  1027. }
  1028. return SUCCESS;
  1029. }
  1030. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  1031. std::vector<std::vector<std::string>> input_tensor_names) {
  1032. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  1033. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  1034. if (it->first == input_tensor_names[j][k]) {
  1035. input_tensor_names[j][k] = it->second;
  1036. break;
  1037. }
  1038. }
  1039. }
  1040. return input_tensor_names;
  1041. }
  1042. CNodePtr GetInternalOperatorInfo(const CNodePtr &cnode, const ValueNodePtr &prim_anf_node) {
  1043. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1044. if (prim->name() == TUPLE_GETITEM || prim->name() == DEPEND) {
  1045. auto prev_cnode = cnode->input(1)->cast<CNodePtr>();
  1046. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  1047. return nullptr;
  1048. }
  1049. auto prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  1050. while (prev_prim->name() == TUPLE_GETITEM || prev_prim->name() == DEPEND) {
  1051. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  1052. if (prev_cnode == nullptr || !IsValueNode<Primitive>(prev_cnode->input(0))) {
  1053. return nullptr;
  1054. }
  1055. prev_prim = prev_cnode->input(0)->cast<ValueNodePtr>()->value()->cast<PrimitivePtr>();
  1056. }
  1057. return prev_cnode;
  1058. }
  1059. return nullptr;
  1060. }
  1061. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1062. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  1063. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  1064. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1065. << entire_costgraph->GetOperators().size() << " operators.";
  1066. } else {
  1067. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1068. }
  1069. } else {
  1070. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1071. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  1072. << entire_costgraph->GetOperators().size() << " operators.";
  1073. } else {
  1074. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  1075. }
  1076. }
  1077. ReshapeCostCompute(all_nodes);
  1078. auto ops = entire_costgraph->GetOperators();
  1079. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1080. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1081. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1082. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1083. }
  1084. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1085. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
  1086. std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
  1087. graph = EliminateGraph(graph, eli_list, index_list);
  1088. size_t num_device = g_device_manager->DeviceNum();
  1089. double device_memory = entire_costgraph->GetDeviceMemory();
  1090. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1091. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1092. } else {
  1093. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1094. return FAILED;
  1095. }
  1096. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list);
  1097. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1098. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1099. } else {
  1100. MS_LOG(ERROR) << "Init selected strategy failed.";
  1101. return FAILED;
  1102. }
  1103. // print the selected strategy
  1104. for (auto &op : entire_costgraph->GetOperators()) {
  1105. StrategyPtr s_strategy = op->selected_strategy();
  1106. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1107. PrintStrategy(s_strategy);
  1108. }
  1109. return SUCCESS;
  1110. }
  1111. } // namespace parallel
  1112. } // namespace mindspore