You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_auto_parallel.cc 50 kB

5 years ago
5 years ago
6 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "parallel/step_auto_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include <vector>
  27. #include "ir/anf.h"
  28. #include "ir/param_value_py.h"
  29. #include "ir/tensor.h"
  30. #include "optimizer/opt.h"
  31. #include "optimizer/optimizer.h"
  32. #include "parallel/auto_parallel/dp_algo_costmodel.h"
  33. #include "parallel/auto_parallel/edge_costmodel.h"
  34. #include "parallel/auto_parallel/graph_costmodel.h"
  35. #include "parallel/auto_parallel/rec_core/rec_generate_strategy.h"
  36. #include "parallel/auto_parallel/rec_core/rec_parse_graph.h"
  37. #include "parallel/auto_parallel/rec_core/rec_partition.h"
  38. #include "parallel/context.h"
  39. #include "parallel/ops_info/tmp_identity_info.h"
  40. #include "parallel/ops_info/reshape_info.h"
  41. #include "parallel/step_parallel.h"
  42. #include "parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  43. #include "pipeline/parse/python_adapter.h"
  44. #include "pipeline/pipeline.h"
  45. namespace mindspore {
  46. namespace parallel {
  47. bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
  48. MS_EXCEPTION_IF_NULL(root);
  49. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  50. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  51. // assume no change to graph
  52. bool changes = false;
  53. // control whether use model_parallel mode
  54. if (!root->has_flag(AUTO_PARALLEL) || (parallel_mode != AUTO_PARALLEL) ||
  55. root->has_flag(AUTO_PARALLEL_RUN_ONCE_ONLY)) {
  56. return changes;
  57. }
  58. // check whether strategy_search_mode is valid
  59. std::string strategy_search_mode = ParallelContext::GetInstance()->strategy_search_mode();
  60. if ((strategy_search_mode != DYNAMIC_PROGRAMMING) && (strategy_search_mode != RECURSIVE_PROGRAMMING)) {
  61. // Setting searching mode: dynanic programming as default.
  62. strategy_search_mode = DYNAMIC_PROGRAMMING;
  63. MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
  64. }
  65. struct timeval start_time, end_time;
  66. (void)gettimeofday(&start_time, nullptr);
  67. if (MsContext::GetInstance()->save_graphs_flag()) {
  68. draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
  69. }
  70. MS_LOG(INFO) << "Now entering step auto parallel";
  71. TOTAL_OPS = 0;
  72. AnfNodePtr ret = root->get_return();
  73. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  74. if (ParallelInit() != SUCCESS) {
  75. MS_LOG(EXCEPTION) << "Parallel init failed";
  76. }
  77. // mark the forward cnodes, parallel only care these nodes
  78. MarkForwardCNode(root);
  79. if (FindCommunicationOp(all_nodes)) {
  80. MS_LOG(EXCEPTION) << "The graph contain communication op";
  81. }
  82. // search parallelization strategy
  83. if (strategy_search_mode == DYNAMIC_PROGRAMMING) {
  84. if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
  85. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using DP searching mode";
  86. }
  87. } else if (strategy_search_mode == RECURSIVE_PROGRAMMING) {
  88. if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
  89. MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
  90. }
  91. } else {
  92. MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected";
  93. }
  94. (void)gettimeofday(&end_time, nullptr);
  95. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  96. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  97. MS_LOG(INFO) << "Now leaving step auto parallel, used time: " << time << " us";
  98. root->flags()[AUTO_PARALLEL_RUN_ONCE_ONLY] = true;
  99. return changes;
  100. }
  101. // Given the node, return whether each input is a parameter or a output of a operator.
  102. // The returned boolean vector should be the same order of the inputs, thus its implementation
  103. // is closely consistent with ExtractShape() in step_parallel.cc
  104. std::vector<bool> ExtractInputParameterByNode(const CNodePtr &node) {
  105. std::vector<bool> is_parameter;
  106. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  107. for (size_t i = 1; i < node_inputs.size(); ++i) {
  108. auto input = node_inputs[i];
  109. if (input->isa<Parameter>()) {
  110. auto input_parameter = input->cast<ParameterPtr>();
  111. if (input_parameter->has_default()) {
  112. auto param_value = std::dynamic_pointer_cast<ParamValuePy>(input_parameter->default_param());
  113. bool require_grad = py::cast<bool>(parse::python_adapter::GetPyObjAttr(param_value->value(), "requires_grad"));
  114. is_parameter.push_back(require_grad);
  115. } else {
  116. is_parameter.push_back(false);
  117. }
  118. } else if (input->isa<CNode>() || IsValueNode<tensor::Tensor>(input) || IsValueNode<RefKey>(input)) {
  119. is_parameter.push_back(false);
  120. }
  121. }
  122. return is_parameter;
  123. }
  124. // Given the type, return the number of bytes to represent this type
  125. size_t GetLengthOfDataType(const TypePtr &type) {
  126. switch (type->type_id()) {
  127. case kNumberTypeBool:
  128. return sizeof(bool);
  129. case kNumberTypeInt8:
  130. return sizeof(int8_t);
  131. case kNumberTypeInt16:
  132. return sizeof(int16_t);
  133. case kNumberTypeInt32:
  134. return sizeof(int32_t);
  135. case kNumberTypeInt64:
  136. return sizeof(int64_t);
  137. case kNumberTypeUInt8:
  138. return sizeof(uint8_t);
  139. case kNumberTypeUInt16:
  140. return sizeof(uint16_t);
  141. case kNumberTypeUInt32:
  142. return sizeof(uint32_t);
  143. case kNumberTypeUInt64:
  144. return sizeof(uint64_t);
  145. case kNumberTypeFloat16:
  146. return sizeof(float) / 2;
  147. case kNumberTypeFloat32:
  148. return sizeof(float);
  149. case kNumberTypeFloat64:
  150. return sizeof(double);
  151. case kNumberTypeInt:
  152. return sizeof(int);
  153. case kNumberTypeUInt:
  154. return sizeof(unsigned int);
  155. case kNumberTypeFloat:
  156. return sizeof(float);
  157. default:
  158. MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
  159. }
  160. }
  161. size_t GetInputsTypeLen(const AnfNodePtr &input) {
  162. MS_EXCEPTION_IF_NULL(input);
  163. if (!input->isa<CNode>() && !input->isa<Parameter>() && !IsValueNode<tensor::Tensor>(input)) {
  164. MS_LOG(EXCEPTION) << "The input node is not a cnode or parameter or tensor";
  165. }
  166. size_t input_type_len = 0;
  167. auto type = input->Type();
  168. MS_EXCEPTION_IF_NULL(type);
  169. if (type->isa<mindspore::TensorType>()) {
  170. auto input_element_type = type->cast<mindspore::TensorTypePtr>()->element();
  171. input_type_len = GetLengthOfDataType(input_element_type);
  172. } else {
  173. MS_LOG(EXCEPTION) << "Unknown type: " << type->type_name();
  174. }
  175. return input_type_len;
  176. }
  177. std::vector<size_t> ExtractInputTypeLengthByNode(const CNodePtr &node) {
  178. MS_EXCEPTION_IF_NULL(node);
  179. std::vector<size_t> inputs_type_len;
  180. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  181. // extract input element length
  182. for (auto &input : node_inputs) {
  183. if (IsValueNode<RefKey>(input)) {
  184. auto func_graph = node->func_graph();
  185. MS_EXCEPTION_IF_NULL(func_graph);
  186. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  187. if (parameters.size() != 1) {
  188. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  189. }
  190. inputs_type_len.push_back(GetInputsTypeLen(parameters[0]));
  191. } else if (input->isa<CNode>() || input->isa<Parameter>() || IsValueNode<tensor::Tensor>(input)) {
  192. // extract input shape from parameter and apply node
  193. inputs_type_len.push_back(GetInputsTypeLen(input));
  194. }
  195. }
  196. return inputs_type_len;
  197. }
  198. std::vector<TypePtr> ExtractOutputTypeByNode(const CNodePtr &node) {
  199. MS_EXCEPTION_IF_NULL(node);
  200. std::vector<TypePtr> outputs_type;
  201. // extract output element type
  202. auto primary_output_type = node->Type();
  203. MS_EXCEPTION_IF_NULL(primary_output_type);
  204. if (primary_output_type->isa<mindspore::Tuple>()) {
  205. // in this case, the output is a tuple
  206. auto tuple_output_type = primary_output_type->cast<mindspore::TuplePtr>();
  207. auto elements = tuple_output_type->elements();
  208. for (auto &ele : elements) {
  209. if (ele->isa<mindspore::TensorType>()) {
  210. auto ele_element_type = ele->cast<mindspore::TensorTypePtr>()->element();
  211. outputs_type.push_back(ele_element_type);
  212. } else {
  213. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  214. }
  215. }
  216. } else {
  217. // in this case, the output is a single tensor
  218. if (primary_output_type->isa<mindspore::TensorType>()) {
  219. auto element_type = primary_output_type->cast<mindspore::TensorTypePtr>()->element();
  220. outputs_type.push_back(element_type);
  221. } else {
  222. MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name();
  223. }
  224. }
  225. return outputs_type;
  226. }
  227. bool IsElementWiseOperator(const std::string &op_name) {
  228. static const std::set<std::string> elementwise_op = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU,
  229. SQRT, CAST, POW, EXP, LOG, COS,
  230. ACOS, LOGICALNOT, NEG, SQUARE, SIGMOID};
  231. auto iter = elementwise_op.find(op_name);
  232. return (iter != elementwise_op.end());
  233. }
  234. bool IsSplittableOperator(const std::string &op_name) {
  235. // clang-format off
  236. static const std::set<std::string> splittable_op =
  237. {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU,
  238. FLOORDIV, L2_NORMALIZE, TENSOR_ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK,
  239. REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING,
  240. MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP,
  241. LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT,
  242. STRIDEDSLICE, GET_NEXT, CAST, NEG, SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE,
  243. SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, SIGMOID_CROSS_ENTROPY_WITH_LOGITS, SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS};
  244. // clang-format on
  245. auto iter = splittable_op.find(op_name);
  246. return (iter != splittable_op.end());
  247. }
  248. bool IsAutoParallelCareNode(const CNodePtr &cnode) {
  249. MS_EXCEPTION_IF_NULL(cnode);
  250. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  251. if (prim_node == nullptr) {
  252. return false;
  253. }
  254. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_node);
  255. if (prim == nullptr) {
  256. return false;
  257. }
  258. bool bool_result = IsParallelCareNode(cnode) && !IsSplittableOperator(prim->name());
  259. if (bool_result) {
  260. MS_LOG(EXCEPTION) << "Should implementing OperatorInfo for: " << prim->name();
  261. } else if (prim->name() == CAST) {
  262. return true;
  263. }
  264. return IsParallelCareNode(cnode) && IsSplittableOperator(prim->name());
  265. }
  266. OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode, StrategyMap *stra_map) {
  267. MS_EXCEPTION_IF_NULL(prim);
  268. MS_EXCEPTION_IF_NULL(cnode);
  269. auto attrs = prim->attrs();
  270. std::vector<Shapes> shape_list = ExtractShape(cnode);
  271. if (shape_list.empty()) {
  272. MS_LOG(EXCEPTION) << "Failure: node " << cnode->UniqueId() << " failed to extract shape";
  273. }
  274. // Create an OperatorInfo instance
  275. OperatorInfoPtr operator_info = NewOperatorInstance(prim, attrs, shape_list);
  276. MS_EXCEPTION_IF_NULL(operator_info);
  277. // Set the parameter information for this OperatorInfo (whether the inputs are parameters or not)
  278. std::vector<bool> parameter_info = ExtractInputParameterByNode(cnode);
  279. if (operator_info->set_is_parameter(parameter_info) != SUCCESS) {
  280. MS_LOG(ERROR) << "Initializing parameter information failed for operator: " << operator_info->name();
  281. return nullptr;
  282. }
  283. // Set the data type for inputs and outputs of this OperatorInfo
  284. auto inputs_type_length = ExtractInputTypeLengthByNode(cnode);
  285. auto outputs_type = ExtractOutputTypeByNode(cnode);
  286. std::vector<size_t> outputs_type_length;
  287. outputs_type_length.reserve(outputs_type.size());
  288. std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length),
  289. GetLengthOfDataType);
  290. if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) {
  291. MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name();
  292. return nullptr;
  293. }
  294. if (operator_info->set_outputs_type(outputs_type) != SUCCESS) {
  295. MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name();
  296. return nullptr;
  297. }
  298. // When the 'inputs' contains numerical values for some operators, these values should be extracted from
  299. // ANF graph
  300. auto &inputs = cnode->inputs();
  301. std::vector<ValuePtr> input_value;
  302. for (size_t index = 1; index < inputs.size(); ++index) {
  303. if (inputs[index]->isa<ValueNode>()) {
  304. input_value.push_back(GetValueNode(inputs[index]));
  305. } else {
  306. input_value.emplace_back(nullptr);
  307. }
  308. }
  309. operator_info->set_input_value(input_value);
  310. operator_info->set_outputs_dtype(cnode->Type());
  311. operator_info->set_cnode(cnode);
  312. // key of strategy map
  313. std::string strategy_key_name = NodeParameterName(cnode);
  314. bool load_strategy_from_ckpt =
  315. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map->find(strategy_key_name) != stra_map->end();
  316. // If no strategy has been configured for this operator, then candidate strategies are generated for
  317. // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy.
  318. // if strategy is set to load from checkpoint, it is prefer to load strategy from checkpoint .
  319. if ((!StrategyFound(attrs) || prim->name() == CAST) && !load_strategy_from_ckpt) {
  320. // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for
  321. // BatchParallelInfo operator
  322. operator_info->ComputeBatchSplitFlagList();
  323. if (operator_info->GenerateStrategies(0) != SUCCESS) {
  324. MS_LOG(ERROR) << "Strategy search for Operator " << operator_info->name() << " failed.";
  325. return nullptr;
  326. }
  327. } else {
  328. // In this case, the configured strategy should be extracted to help setting cost
  329. StrategyPtr strategyPtr;
  330. if (load_strategy_from_ckpt) {
  331. strategyPtr = (*stra_map)[strategy_key_name];
  332. } else {
  333. strategyPtr = parallel::ExtractStrategy(attrs);
  334. }
  335. if (strategyPtr != nullptr) {
  336. if (prim->name() == RESHAPE) {
  337. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  338. }
  339. // Set cost for this configured strategy
  340. if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) {
  341. MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed";
  342. } else if (FULLY_USE_DEVICES) {
  343. // If configured to fully use devices, then checking for the user-specified strategy
  344. int32_t used_devices = operator_info->used_devices();
  345. MS_EXCEPTION_IF_NULL(g_device_manager);
  346. auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size();
  347. // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel
  348. if (used_devices == 1) {
  349. return operator_info;
  350. }
  351. // 'used_devices == -1' means that 'used_devices_' is not set
  352. if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) {
  353. MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, "
  354. << "but the specified strategy uses device: " << used_devices
  355. << ", total devices: " << total_device_num;
  356. }
  357. }
  358. }
  359. }
  360. return operator_info;
  361. }
  362. // Using CNode's UniqueIds to construct nodes
  363. Status ConstructCostGraphNodesByUniqueId(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  364. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  365. entire_costgraph = std::make_shared<CostGraph>();
  366. entire_costgraph->SetDeviceMemoryAndCostParameter();
  367. // The map from CNode's UniqueId to its operatorInfo
  368. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  369. // extract strategy from checkpoint for multi-train
  370. StrategyMap stra_map;
  371. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  372. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  373. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  374. }
  375. }
  376. // Step 1
  377. for (auto &node : all_nodes) {
  378. // NOTE: we only care about splittable Primitive operators
  379. auto cnode = node->cast<CNodePtr>();
  380. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  381. if (bool_result) {
  382. continue;
  383. }
  384. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  385. if (!IsAutoParallelCareNode(cnode)) {
  386. continue;
  387. }
  388. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  389. MS_EXCEPTION_IF_NULL(prim);
  390. auto search_cnode = from_cnode_to_info.find(cnode->UniqueId());
  391. if (search_cnode == from_cnode_to_info.end()) {
  392. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  393. if (operator_info == nullptr) {
  394. return FAILED;
  395. }
  396. // Needed by rec_parser
  397. operator_info->set_type(prim->name());
  398. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  399. entire_costgraph->AddOperator(operator_info);
  400. (void)cnode->set_operator_info(operator_info);
  401. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  402. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  403. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  404. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  405. // Needed by rec_parser
  406. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  407. } else {
  408. // Two CNODEs' UniqueIds should not be equal
  409. MS_LOG(EXCEPTION) << "The CNode with UniqueId: " << cnode->UniqueId()
  410. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  411. << " is set OperatorInfo: " << search_cnode->second->name() << ", Primitive: " << prim->name();
  412. }
  413. }
  414. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  415. return SUCCESS;
  416. }
  417. // Using CNode's UniqueIdThroughCopys to construct nodes
  418. Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &) {
  419. MS_LOG(INFO) << "Constructing nodes for cost graph begins.";
  420. entire_costgraph = std::make_shared<CostGraph>();
  421. entire_costgraph->SetDeviceMemoryAndCostParameter();
  422. // The map from CNode's UniqueIdThroughCopy to its operatorInfo
  423. std::map<std::string, OperatorInfoPtr> from_cnode_to_info;
  424. // extract strategy from checkpoint for multi-train
  425. StrategyMap stra_map;
  426. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn()) {
  427. if (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
  428. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  429. }
  430. }
  431. for (auto &node : all_nodes) {
  432. // NOTE: we only care about splittable Primitive operators
  433. auto cnode = node->cast<CNodePtr>();
  434. bool bool_result = (cnode == nullptr) || (!IsValueNode<Primitive>(cnode->input(0)));
  435. if (bool_result) {
  436. continue;
  437. }
  438. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  439. if (!IsAutoParallelCareNode(cnode)) {
  440. continue;
  441. }
  442. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  443. // Find the operatorInfo if it exists
  444. auto search_cnode = from_cnode_to_info.find(cnode->UniqueIdThroughCopy());
  445. if (search_cnode == from_cnode_to_info.end()) {
  446. // In this case, the corresponding OperatorInfo is not created, create the new one.
  447. auto operator_info = CreateTheOperatorInfo(prim, cnode, &stra_map);
  448. if (operator_info == nullptr) {
  449. return FAILED;
  450. }
  451. // Needed by rec_parser
  452. operator_info->set_type(prim->name());
  453. std::vector<std::string> inputs_tensor_name = ExtractInputsTensorName(cnode);
  454. entire_costgraph->AddOperator(operator_info);
  455. (void)cnode->set_operator_info(operator_info);
  456. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  457. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  458. << " is set OperatorInfo: " << operator_info->name() << ", Primitive: " << prim->name();
  459. (void)from_cnode_to_info.emplace(std::make_pair(cnode->UniqueIdThroughCopy(), operator_info));
  460. // Needed by rec_parser
  461. entire_costgraph->add_inputs_tensor_name(inputs_tensor_name);
  462. } else {
  463. auto current_op_ptr = search_cnode->second;
  464. if (current_op_ptr == nullptr) {
  465. MS_LOG(EXCEPTION) << "Find " << prim->name() << " from CostGraph failed.";
  466. } else {
  467. bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) &&
  468. (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) &&
  469. (current_op_ptr->name().find(prim->name()) == std::string::npos);
  470. if (is_find_wrong) {
  471. MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name()
  472. << " does not match the Prim: " << prim->name();
  473. }
  474. (void)cnode->set_operator_info(current_op_ptr);
  475. MS_LOG(INFO) << "The CNode with UniqueId: " << cnode->UniqueId()
  476. << " and UniqueIdThroughCopy: " << cnode->UniqueIdThroughCopy()
  477. << " is set OperatorInfo: " << current_op_ptr->name() << ", Primitive: " << prim->name();
  478. }
  479. }
  480. }
  481. MS_LOG(INFO) << "Constructing nodes for cost graph ends.";
  482. return SUCCESS;
  483. }
  484. void ConstructCostGraphEdges(const std::vector<AnfNodePtr> &all_nodes) {
  485. // Step 2
  486. MS_LOG(INFO) << "Constructing edges for cost graph begins.";
  487. for (auto &node : all_nodes) {
  488. auto cnode = node->cast<CNodePtr>();
  489. bool bool_result_cnode = (cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0));
  490. if (bool_result_cnode) {
  491. continue;
  492. }
  493. auto &inputs = cnode->inputs();
  494. ValueNodePtr prim_anf_node = inputs[0]->cast<ValueNodePtr>();
  495. if (!IsAutoParallelCareNode(cnode)) {
  496. continue;
  497. }
  498. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  499. size_t edge_count = 0;
  500. for (size_t i = 1; i < inputs.size(); ++i) {
  501. auto prev_cnode = inputs[i]->cast<CNodePtr>();
  502. bool bool_result_prev_cnode = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  503. if (bool_result_prev_cnode) {
  504. continue;
  505. }
  506. ValueNodePtr prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  507. PrimitivePtr prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  508. size_t output_index = 0;
  509. bool bool_result =
  510. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  511. while (bool_result) {
  512. if (IsAutoParallelCareNode(prev_cnode)) {
  513. std::string edge_name =
  514. prev_cnode->operator_info()->name() + OPERATOR_TO_OPERATOR_CONNECTOR + cnode->operator_info()->name();
  515. // If the edge between these two operators already has been added, then the edge will not be added again.
  516. if (entire_costgraph->IsEdgeInCostGraph(edge_name, output_index, i - 1)) {
  517. break;
  518. }
  519. EdgePtr edge_ptr;
  520. MS_LOG(INFO) << "Creating edge: " << edge_name;
  521. bool follow_strategy = (prim->name() == RESHAPE) || (prev_prim->name() == RESHAPE) ||
  522. (ELEMENTWISE_OP_STRA_FOLLOW && IsElementWiseOperator(prev_prim->name()));
  523. if (follow_strategy) {
  524. // Redistribution in not allowed on the edge.
  525. // Elementwise operators have the same strategy as their previous operators.
  526. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  527. output_index, i - 1, false, true);
  528. } else {
  529. edge_ptr = std::make_shared<Edge>(edge_name, prev_cnode->operator_info(), cnode->operator_info(),
  530. output_index, i - 1, false);
  531. }
  532. // Init costs for this edge
  533. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  534. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  535. }
  536. cnode->operator_info()->AddPrevEdge(edge_ptr);
  537. prev_cnode->operator_info()->AddSuccEdge(edge_ptr);
  538. entire_costgraph->AddEdge(prev_cnode->operator_info(), cnode->operator_info(), edge_ptr);
  539. MS_LOG(INFO) << "Successfully adding the edge between " << prev_cnode->operator_info()->name() << " and "
  540. << cnode->operator_info()->name();
  541. edge_count++;
  542. break;
  543. } else if (prev_prim->name() == TUPLE_GETITEM) {
  544. // In this case, 'prev_anf_node' is 'tuple_getitem', the actual precursor node is node before
  545. // this 'tuple_getitem'
  546. MS_LOG(INFO) << "Jumping the 'tuple_getitem' operator.";
  547. output_index = IntToSize(GetValue<int>(GetValueNode(prev_cnode->input(2))));
  548. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  549. bool bool_result_tuple = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  550. if (bool_result_tuple) {
  551. break;
  552. }
  553. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  554. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  555. if (!IsAutoParallelCareNode(prev_cnode)) {
  556. MS_LOG(EXCEPTION) << "Did not create OperatorInfo for : " << prev_prim->name();
  557. }
  558. MS_LOG(INFO) << "Jumped the 'tuple_getitem' operator, "
  559. << "and creating an edge between the Operator before "
  560. << "'tuple_getitem' and the Operator after 'tuple_getitem'.";
  561. } else if (prev_prim->name() == DEPEND) {
  562. // In this case, 'prev_anf_node' is 'depend', the actual precursor node is node before
  563. // this 'depend'
  564. MS_LOG(INFO) << "Jumping the 'depend' operator.";
  565. prev_cnode = prev_cnode->input(1)->cast<CNodePtr>();
  566. bool bool_result_depend = (prev_cnode == nullptr) || (!IsValueNode<Primitive>(prev_cnode->input(0)));
  567. if (bool_result_depend) {
  568. break;
  569. }
  570. prev_prim_anf_node = prev_cnode->input(0)->cast<ValueNodePtr>();
  571. prev_prim = prev_prim_anf_node->value()->cast<PrimitivePtr>();
  572. MS_LOG(INFO) << "Jumped the 'depend' operator, "
  573. << "and creating an edge between the Operator before "
  574. << "'depend' and the Operator after 'depend'.";
  575. }
  576. bool_result =
  577. (IsAutoParallelCareNode(prev_cnode)) || (prev_prim->name() == TUPLE_GETITEM) || (prev_prim->name() == DEPEND);
  578. }
  579. }
  580. MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << cnode->operator_info()->name();
  581. }
  582. MS_LOG(INFO) << "Constructing edges for cost graph ends.";
  583. }
  584. std::pair<AnfNodePtr, std::vector<AnfNodePtr>> CNodeWithRefKeys(const AnfNodePtr &cnode) {
  585. MS_EXCEPTION_IF_NULL(cnode);
  586. std::vector<AnfNodePtr> refkeys;
  587. if (cnode->isa<CNode>()) {
  588. auto cnode_ptr = cnode->cast<CNodePtr>();
  589. auto inputs = cnode_ptr->inputs();
  590. for (auto &one_input : inputs) {
  591. if (IsValueNode<RefKey>(one_input)) {
  592. refkeys.push_back(one_input);
  593. }
  594. }
  595. if (refkeys.size() >= 1) {
  596. return std::make_pair(cnode, refkeys);
  597. }
  598. }
  599. return {nullptr, refkeys};
  600. }
  601. void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
  602. // Step 3
  603. for (auto &node : all_nodes) {
  604. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  605. if ((!node->isa<Parameter>()) && (cnode_with_refkeys.first == nullptr)) {
  606. continue;
  607. }
  608. std::string parameter_name;
  609. AnfNodePtr target_parameter = nullptr;
  610. AnfNodeIndexSet target_set;
  611. if (cnode_with_refkeys.first != nullptr) {
  612. // Dealing with the RefKey case
  613. auto refkeys = cnode_with_refkeys.second;
  614. auto cnode = cnode_with_refkeys.first;
  615. auto cnode_ptr = cnode->cast<CNodePtr>();
  616. if (cnode_ptr == nullptr || !IsValueNode<Primitive>(cnode_ptr->input(0))) {
  617. continue;
  618. }
  619. if (!IsAutoParallelCareNode(cnode_ptr)) {
  620. continue;
  621. }
  622. if (refkeys.size() > 1) {
  623. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << " 's inputs have more than 1 RefKeys.";
  624. }
  625. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  626. auto cnode_func_graph = cnode->func_graph();
  627. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  628. // Find the RefKey being used
  629. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  630. for (auto &candidate : candidate_set_by_refkey) {
  631. auto candidate_node = candidate.first;
  632. auto c = candidate_node->cast<CNodePtr>();
  633. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  634. continue;
  635. }
  636. if (!IsAutoParallelCareNode(c)) {
  637. continue;
  638. }
  639. target_set.add(candidate);
  640. }
  641. // Find the corresponding Parameter being used
  642. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  643. if (parameters.size() != 1) {
  644. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  645. }
  646. parameter_name = parameters[0]->cast<ParameterPtr>()->name();
  647. target_parameter = parameters[0];
  648. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  649. for (auto &candidate : candidate_set_by_para) {
  650. auto candidate_node = candidate.first;
  651. auto c = candidate_node->cast<CNodePtr>();
  652. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  653. continue;
  654. }
  655. if (!IsAutoParallelCareNode(c)) {
  656. continue;
  657. }
  658. (void)target_set.insert(candidate);
  659. }
  660. } else if (node->isa<Parameter>()) {
  661. // Dealing with the Parameter case
  662. MS_EXCEPTION_IF_NULL(node->func_graph());
  663. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  664. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  665. for (auto &candidate : candidate_set) {
  666. auto candidate_node = candidate.first;
  667. auto c = candidate_node->cast<CNodePtr>();
  668. if (c == nullptr || !IsValueNode<Primitive>(c->input(0))) {
  669. continue;
  670. }
  671. if (!IsAutoParallelCareNode(c)) {
  672. continue;
  673. }
  674. (void)target_set.insert(candidate);
  675. }
  676. // In this case, node is a Parameter
  677. parameter_name = node->cast<ParameterPtr>()->name();
  678. target_parameter = node;
  679. }
  680. if (target_set.size() <= 1) {
  681. continue;
  682. }
  683. // Rule out the case when a Parameter being used by a Operator, but the Operator appears in multiple CNODEs
  684. std::set<std::string> target_without_duplicate;
  685. for (auto &target : target_set) {
  686. auto target_cnode = target.first->cast<CNodePtr>();
  687. auto input_index = target.second;
  688. (void)target_without_duplicate.insert(std::to_string(input_index) + target_cnode->operator_info()->name());
  689. }
  690. if (target_without_duplicate.size() <= 1) {
  691. continue;
  692. }
  693. // Here, it is sure that this Parameter (RefKey) is being used by multiple Operators.
  694. OperatorInfoPtr tmp_identity_ptr;
  695. bool new_identity = false;
  696. std::string tmp_identity_name;
  697. auto returned_identity = entire_costgraph->FindTmpIdentityByParameterName(parameter_name);
  698. if (returned_identity != nullptr) {
  699. // In this case, the TmpIdentityInfo instance has already been created
  700. new_identity = false;
  701. tmp_identity_ptr = returned_identity;
  702. tmp_identity_name = tmp_identity_ptr->name();
  703. } else {
  704. // In the case, the TmpIdentityInfo instance has NOT been created. Thus, a new one is created.
  705. new_identity = true;
  706. // 1) extract input shape from this Parameter
  707. MS_EXCEPTION_IF_NULL(target_parameter);
  708. AbstractBasePtr abstract = target_parameter->abstract();
  709. if (abstract == nullptr) {
  710. MS_LOG(EXCEPTION) << "Failure: abstract is nullptr";
  711. }
  712. auto input_shape = dyn_cast<abstract::Shape>(abstract->GetShapeTrack());
  713. if (input_shape == nullptr) {
  714. MS_LOG(EXCEPTION) << "Failure: input_shape is nullptr";
  715. }
  716. std::vector<int> shape_int = input_shape->shape();
  717. Shape shape;
  718. (void)std::transform(shape_int.begin(), shape_int.end(), std::back_inserter(shape),
  719. [](int sub_shape) { return static_cast<int32_t>(sub_shape); });
  720. Shapes inputs_shape = {shape};
  721. Shapes outputs_shape = {shape};
  722. // 2) init the attr
  723. std::unordered_map<std::string, ValuePtr> attr = {};
  724. // Create the TmpIdentity instance
  725. tmp_identity_ptr = std::make_shared<TmpIdentityInfo>(inputs_shape, outputs_shape, attr);
  726. tmp_identity_ptr->set_name(tmp_identity_ptr->name() + std::to_string(TOTAL_OPS));
  727. TOTAL_OPS++;
  728. tmp_identity_ptr->set_refkey_parameter_name(parameter_name);
  729. // Set the parameter and type lengths for inputs and outputs
  730. std::vector<bool> is_parameter;
  731. auto casted_target_parameter = target_parameter->cast<ParameterPtr>();
  732. MS_EXCEPTION_IF_NULL(casted_target_parameter);
  733. if (casted_target_parameter->has_default()) {
  734. auto param_value = std::dynamic_pointer_cast<ParamValuePy>(casted_target_parameter->default_param());
  735. bool require_grad = py::cast<bool>(parse::python_adapter::GetPyObjAttr(param_value->value(), "requires_grad"));
  736. is_parameter.push_back(require_grad);
  737. } else {
  738. is_parameter.push_back(false);
  739. }
  740. if (tmp_identity_ptr->set_is_parameter(is_parameter) != SUCCESS) {
  741. MS_LOG(EXCEPTION) << "Setting parameter for TmpIdentityInfo failed";
  742. }
  743. auto node_type = target_parameter->Type();
  744. if (node_type->isa<mindspore::TensorType>()) {
  745. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  746. std::vector<size_t> type_length = {GetLengthOfDataType(input_element_type)};
  747. if (tmp_identity_ptr->SetInputAndOutputTypeLength(type_length, type_length) != SUCCESS) {
  748. MS_LOG(EXCEPTION) << "Setting input and output type length for TmpIdentityInfo failed";
  749. }
  750. } else {
  751. MS_LOG(EXCEPTION) << "Unknown type: " << node_type->type_name();
  752. }
  753. // Generate strategies for this TmpIdentityInfo instance;
  754. if (tmp_identity_ptr->GenerateStrategies(0) != SUCCESS) {
  755. MS_LOG(EXCEPTION) << "Strategy search for Operator failed : " << tmp_identity_ptr->name();
  756. }
  757. }
  758. // A flag recording whether new edges have been created or not
  759. bool add_identity_edge = false;
  760. // Create edges between this TmpIdentityInfo instance and subsequent Operator instances
  761. for (auto &target : target_set) {
  762. auto target_cnode = target.first->cast<CNodePtr>();
  763. auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
  764. auto input_index = target.second;
  765. std::string edge_name =
  766. std::string(IDENTITY_INFO) + OPERATOR_TO_OPERATOR_CONNECTOR + target_cnode->operator_info()->name();
  767. // If the edge between these two operators already has been added, then the edge will not be added again.
  768. if (entire_costgraph->IsEdgeInCostGraph(edge_name, 0, IntToSize(input_index - 1))) {
  769. continue;
  770. }
  771. std::shared_ptr<Edge> edge_ptr = std::make_shared<Edge>(
  772. edge_name, tmp_identity_ptr, target_cnode->operator_info(), 0, input_index - 1, false, true);
  773. if (edge_ptr->InitEdgeCost() != SUCCESS) {
  774. MS_LOG(EXCEPTION) << "Edge cost initialization failed";
  775. }
  776. target_cnode->operator_info()->AddPrevEdge(edge_ptr);
  777. tmp_identity_ptr->AddSuccEdge(edge_ptr);
  778. entire_costgraph->AddEdge(tmp_identity_ptr, target_cnode->operator_info(), edge_ptr);
  779. MS_LOG(INFO) << "Successfully adding the edge between " << tmp_identity_ptr->name() << " and "
  780. << target_cnode->operator_info()->name();
  781. add_identity_edge = true;
  782. }
  783. if (new_identity && add_identity_edge) {
  784. // Add the TmpIdentityInfo to CostGraph if BOTH two conditions are satisfied
  785. entire_costgraph->AddOperator(tmp_identity_ptr);
  786. }
  787. }
  788. }
  789. bool FindReshape(const CNodePtr &cnode) {
  790. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  791. return false;
  792. }
  793. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  794. if (!IsParallelCareNode(cnode) || (cnode->operator_info() == nullptr)) {
  795. return false;
  796. }
  797. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  798. MS_EXCEPTION_IF_NULL(prim);
  799. OperatorInfoPtr operator_info = cnode->operator_info();
  800. if (operator_info == nullptr) {
  801. MS_LOG(EXCEPTION) << "Failure:Primitive " << prim->ToString() << " OperatorInstance is nullptr";
  802. }
  803. if (prim->name() != RESHAPE) {
  804. return false;
  805. }
  806. return true;
  807. }
  808. // find previous node, then obtain its strategy_cost_ vector to get its layout vector.
  809. bool FindPreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int32_t *out_index) {
  810. // if previous node is a parameter, handle it in the outsize.
  811. if (node->isa<Parameter>()) {
  812. return false;
  813. }
  814. if (!node->isa<CNode>()) {
  815. return false;
  816. }
  817. CNodePtr cnode = node->cast<CNodePtr>();
  818. if (!IsValueNode<Primitive>(cnode->input(0))) {
  819. return false;
  820. }
  821. if (IsParallelCareNode(cnode) && (cnode->operator_info() != nullptr)) {
  822. *pre_operator_info = cnode->operator_info();
  823. *out_index = 0;
  824. return true;
  825. }
  826. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  827. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  828. if (prim->name() == TUPLE_GETITEM) {
  829. *out_index = GetTupleGetItemIndex(cnode);
  830. // find tuple_get_item's previous node
  831. auto pre_node = cnode->input(1);
  832. if (!pre_node->isa<CNode>()) {
  833. MS_LOG(EXCEPTION) << "tuple get item's second input is not a cnode";
  834. }
  835. CNodePtr pre_cnode = pre_node->cast<CNodePtr>();
  836. if (IsParallelCareNode(pre_cnode) && (pre_cnode->operator_info() != nullptr)) {
  837. *pre_operator_info = pre_cnode->operator_info();
  838. return true;
  839. }
  840. return false;
  841. }
  842. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  843. if (prim->name() == DEPEND && index != 1) {
  844. continue;
  845. }
  846. if (!FindPreNodeStraCosts(cnode->inputs()[index], pre_operator_info, out_index)) {
  847. continue;
  848. }
  849. return true;
  850. }
  851. MS_LOG(WARNING) << "FindPreNodeStraCosts failed, if reshape is not the first primitive, there must be some error";
  852. return false;
  853. }
  854. // find next node, then obtain its strategy_cost_ vector to get its layout vector.
  855. // if reshape's output connect to several primitive, return the first layout found
  856. bool FindNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int32_t *in_index) {
  857. MS_EXCEPTION_IF_NULL(cnode);
  858. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  859. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  860. MS_EXCEPTION_IF_NULL(manager);
  861. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  862. for (auto &node_pair : node_set) {
  863. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  864. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  865. continue;
  866. }
  867. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  868. MS_EXCEPTION_IF_NULL(prim_anf_node);
  869. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  870. MS_EXCEPTION_IF_NULL(node_prim);
  871. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  872. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  873. continue;
  874. }
  875. if (IsParallelCareNode(use_apply) && (use_apply->operator_info() != nullptr)) {
  876. MS_LOG(INFO) << "FindNextNodeStraCosts success prim " << node_prim->name();
  877. *next_operator_info = use_apply->operator_info();
  878. *in_index = node_pair.second - 1;
  879. return true;
  880. }
  881. MS_LOG(DEBUG) << "FindNextNodeStraCosts failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  882. << " " << (use_apply->operator_info() != nullptr);
  883. if (FindNextNodeStraCosts(use_apply, next_operator_info, in_index)) {
  884. return true;
  885. }
  886. }
  887. return false;
  888. }
  889. void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
  890. for (auto node : all_nodes) {
  891. auto cnode = node->cast<CNodePtr>();
  892. if (!FindReshape(cnode)) {
  893. continue;
  894. }
  895. MS_ASSERT(cnode->inputs().size() == 3);
  896. // get previous node's strategy_cost_
  897. auto pre_node = cnode->input(1);
  898. int32_t out_index = 0;
  899. OperatorInfoPtr pre_operator_info;
  900. std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
  901. if (pre_node->isa<Parameter>()) {
  902. OperatorInfoPtr operator_info = cnode->operator_info();
  903. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  904. reshape_info->SetCostForReshapeWithParameter();
  905. pre_operator_info = reshape_info;
  906. pre_stra_costs = reshape_info->strategy_cost();
  907. } else {
  908. if (!FindPreNodeStraCosts(pre_node, &pre_operator_info, &out_index)) {
  909. MS_LOG(EXCEPTION) << "FindPreNodeStraCosts for reshape failed";
  910. }
  911. pre_stra_costs = pre_operator_info->strategy_cost();
  912. }
  913. // get next node's strategy_cost_
  914. int32_t in_index = 0;
  915. OperatorInfoPtr next_operator_info;
  916. std::vector<std::shared_ptr<StrategyWithCost>> next_stra_costs;
  917. bool find_next_node = FindNextNodeStraCosts(cnode, &next_operator_info, &in_index);
  918. if (!find_next_node) {
  919. MS_LOG(INFO) << "FindNextNodeStraCosts for reshape failed";
  920. }
  921. // set input_layout and output_layout for reshape.
  922. // init reshape and set cost for each input_layout and output_layout.
  923. OperatorInfoPtr operator_info = cnode->operator_info();
  924. auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  925. reshape_info->set_pre_operator_name(pre_operator_info->name());
  926. reshape_info->set_pre_operator_index(out_index);
  927. if (find_next_node) {
  928. next_stra_costs = next_operator_info->strategy_cost();
  929. reshape_info->set_next_operator_name(next_operator_info->name());
  930. reshape_info->set_next_operator_index(in_index);
  931. }
  932. bool is_prev_param = pre_node->isa<Parameter>();
  933. if (reshape_info->GenetateStrategyCosts(pre_stra_costs, next_stra_costs, out_index, in_index, is_prev_param) !=
  934. SUCCESS) {
  935. MS_LOG(EXCEPTION) << "reshape genetate strategy_costs failed!";
  936. }
  937. }
  938. }
  939. Status ParallelStrategySearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  940. // There are 4 meta-steps to determine the parallelization strategy for the ANF graph.
  941. // Step 1: Traverse the ANF graph, and create NODEs for costgraph:
  942. // create the OperatorInfo object for each primitive, and enumerate the parallelization strategies
  943. // for each OperatorInfo;
  944. // Step 1.1: Deal with 'Reshape':
  945. // For 'Reshape', it takes its previous operator's layout as its input layout, and takes its next operator's
  946. // layout as its output layout.
  947. // Step 2: Traverse the ANF graph, and create EDGES for costgraph:
  948. // create the Edge object for each pair of OperatorInfo, and enumerate the parallelization strategies
  949. // for each edge, based on the strategies of two OperatorInfos;
  950. // Step 3: Augment the costgraph:
  951. // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity
  952. // operator for this Parameter, and add an edge for the use of this Parameter by each
  953. // subsequent operator;
  954. // Step 3.1: Calculate memory usage:
  955. // note the memory usage calculation is different in training phase and inference phase.
  956. // Step 4: Run the Dynamic Programming algorithm:
  957. // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge
  958. // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input
  959. // tensor layout. Note that there may be several connected components in the costgraph, and the DP algorithm
  960. // runs on each of them.
  961. //
  962. // OUTPUT: the determined strategy for each operator.
  963. // Step 1
  964. if (CostModelContext::GetInstance()->is_multi_subgraphs()) {
  965. if (ConstructCostGraphNodesByUniqueIdTC(all_nodes, root) == SUCCESS) {
  966. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  967. << entire_costgraph->GetOperators().size() << " operators.";
  968. } else {
  969. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  970. }
  971. } else {
  972. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  973. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are "
  974. << entire_costgraph->GetOperators().size() << " operators.";
  975. } else {
  976. MS_LOG(EXCEPTION) << "Constructing nodes for cost graph failed.";
  977. }
  978. }
  979. // Step 1.1
  980. ReshapeCostCompute(all_nodes);
  981. // Step 2
  982. ConstructCostGraphEdges(all_nodes);
  983. MS_LOG(INFO) << "Constructing edges for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  984. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  985. // Step 3: Augment the costgraph.
  986. AugmentCostGraph(all_nodes);
  987. MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size()
  988. << " operators, and " << entire_costgraph->GetNumEdges() << " edges.";
  989. // Step 3.1: Calculate the memory usage
  990. if (entire_costgraph->CalculateMemoryCost() != SUCCESS) {
  991. MS_LOG(EXCEPTION) << "Calculating memory cost failed.";
  992. }
  993. // Step 4: run DP algorithm on the costgraph.
  994. if (GetStrategy(entire_costgraph) != SUCCESS) {
  995. MS_LOG(ERROR) << "Strategy search for cost-graph fails";
  996. return FAILED;
  997. }
  998. MS_LOG(INFO) << "Searching strategy succeeded.";
  999. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1000. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1001. } else {
  1002. MS_LOG(EXCEPTION) << "Init selected strategy failed.";
  1003. }
  1004. // print the selected strategy
  1005. for (auto &op : entire_costgraph->GetOperators()) {
  1006. StrategyPtr s_strategy = op->selected_strategy();
  1007. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1008. PrintStrategy(s_strategy);
  1009. }
  1010. return SUCCESS;
  1011. }
  1012. std::vector<std::vector<std::string>> RecInputTensorNames(const std::map<std::string, std::string>::iterator &it,
  1013. std::vector<std::vector<std::string>> input_tensor_names) {
  1014. for (size_t j = 0; j < input_tensor_names.size(); j++) {
  1015. for (size_t k = 0; k < input_tensor_names[j].size(); k++) {
  1016. if (it->first == input_tensor_names[j][k]) {
  1017. input_tensor_names[j][k] = it->second;
  1018. break;
  1019. }
  1020. }
  1021. }
  1022. return input_tensor_names;
  1023. }
  1024. Status ParallelStrategyRecSearch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  1025. if (ConstructCostGraphNodesByUniqueId(all_nodes, root) == SUCCESS) {
  1026. MS_LOG(INFO) << "Constructing nodes for cost graph succeeded. There are " << entire_costgraph->GetOperators().size()
  1027. << " operators.";
  1028. } else {
  1029. MS_LOG(ERROR) << "Constructing nodes for cost graph failed.";
  1030. return FAILED;
  1031. }
  1032. auto ops = entire_costgraph->GetOperators();
  1033. std::vector<std::vector<std::string>> input_tensor_names = entire_costgraph->get_inputs_tensor_name_list();
  1034. auto tuple_getitem_list = entire_costgraph->get_tuple_getitem_list();
  1035. for (auto it = tuple_getitem_list.begin(); it != tuple_getitem_list.end();) {
  1036. input_tensor_names = RecInputTensorNames(it++, input_tensor_names);
  1037. }
  1038. std::shared_ptr<Graph> graph = ParseGraph(ops, input_tensor_names);
  1039. std::shared_ptr<std::vector<std::vector<size_t>>> eli_list(new std::vector<std::vector<size_t>>);
  1040. std::shared_ptr<std::vector<size_t>> index_list(new std::vector<size_t>);
  1041. graph = EliminateGraph(graph, eli_list, index_list);
  1042. size_t num_device = g_device_manager->DeviceNum();
  1043. double device_memory = entire_costgraph->GetDeviceMemory();
  1044. if (PartitionForAllDevices(num_device, device_memory, graph) == SUCCESS) {
  1045. MS_LOG(INFO) << "Partition Success With " << num_device << " devices.";
  1046. } else {
  1047. MS_LOG(ERROR) << "PartitionForAllDevices failed.";
  1048. return FAILED;
  1049. }
  1050. GenerateStrategy(graph, ops, eli_list, input_tensor_names, index_list);
  1051. if (entire_costgraph->InitSelectedStrategy() == SUCCESS) {
  1052. MS_LOG(INFO) << "Init selected strategy succeeded.";
  1053. } else {
  1054. MS_LOG(ERROR) << "Init selected strategy failed.";
  1055. return FAILED;
  1056. }
  1057. // print the selected strategy
  1058. for (auto &op : entire_costgraph->GetOperators()) {
  1059. StrategyPtr s_strategy = op->selected_strategy();
  1060. MS_LOG(INFO) << op->name() << " : The strategy is:";
  1061. PrintStrategy(s_strategy);
  1062. }
  1063. return SUCCESS;
  1064. }
  1065. } // namespace parallel
  1066. } // namespace mindspore