You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_parallel.cc 137 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include "base/core_ops.h"
  27. #include "frontend/operator/ops.h"
  28. #include "frontend/optimizer/optimizer.h"
  29. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  30. #include "frontend/parallel/context.h"
  31. #include "frontend/parallel/device_manager.h"
  32. #include "frontend/parallel/dynamic_creator.h"
  33. #include "frontend/parallel/graph_util/generate_graph.h"
  34. #include "frontend/parallel/graph_util/graph_info.h"
  35. #include "frontend/parallel/graph_util/node_info.h"
  36. #include "frontend/parallel/node_check.h"
  37. #include "frontend/parallel/ops_info/matmul_info.h"
  38. #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
  39. #include "ir/param_info.h"
  40. #include "ir/tensor.h"
  41. #include "utils/comm_manager.h"
  42. #include "utils/ms_context.h"
  43. #include "utils/symbolic.h"
  44. #include "mindspore/core/utils/parallel_node_check.h"
  45. #if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
  46. #include "ps/util.h"
  47. #include "ps/ps_context.h"
  48. #endif
  49. using mindspore::tensor::Tensor;
  50. namespace mindspore {
  51. namespace parallel {
  52. static const std::set<std::string> COMMUNICATION_OPS = {ALL_REDUCE, ALL_GATHER, ALL_TO_ALL, REDUCE_SCATTER};
  53. static const std::set<std::string> INVALID_LOSS_OPS = {GET_NEXT, VIRTUALLOSS, LOAD, UPDATESTATE};
  54. // g_RefMap, for CNode B input i is a RefKey[Parameter C],
  55. // it will be one item in map with key: C, and value: (B, i)
  56. static std::map<AnfNodePtr, std::pair<AnfNodePtr, int64_t>> g_RefMap;
  57. void SetCommunicationOpGroupLabel(std::vector<AnfNodePtr> new_node_input) {
  58. if (new_node_input.empty()) {
  59. return;
  60. }
  61. auto prim_anf_node = new_node_input[0]->cast<ValueNodePtr>();
  62. auto prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  63. MS_EXCEPTION_IF_NULL(prim);
  64. auto attrs = prim->attrs();
  65. auto iter = attrs.find(GROUP);
  66. if (iter != attrs.end()) {
  67. auto value = iter->second;
  68. MS_EXCEPTION_IF_NULL(value);
  69. if (value->isa<StringImm>()) {
  70. std::string hash_name = value->cast<StringImmPtr>()->value();
  71. MS_EXCEPTION_IF_NULL(g_device_manager);
  72. std::string rank_list_name = g_device_manager->FindRankListNameByHashName(hash_name);
  73. (void)prim->AddAttr(GROUP_RANKS, MakeValue(rank_list_name));
  74. }
  75. }
  76. }
  77. void SetMiniStepOpDoMirrorLabel(std::vector<AnfNodePtr> new_node_input, bool accu_flag) {
  78. if (new_node_input.empty()) {
  79. return;
  80. }
  81. auto prim_anf_node = new_node_input[0]->cast<ValueNodePtr>();
  82. auto prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  83. MS_EXCEPTION_IF_NULL(prim);
  84. auto attrs = prim->attrs();
  85. attrs[DO_MIRROR] = MakeValue<bool>(!accu_flag);
  86. prim->SetAttrs(attrs);
  87. }
  88. std::vector<AnfNodePtr> CreateInput(const Operator &op, const AnfNodePtr &node, const std::string &instance_name) {
  89. MS_EXCEPTION_IF_NULL(node);
  90. OperatorArgs arg_forward = op.second;
  91. ValuePtr pyop_instance = CreatOpInstance(arg_forward.first, op.first, instance_name);
  92. MS_EXCEPTION_IF_NULL(pyop_instance);
  93. OperatorParams params = arg_forward.second;
  94. std::vector<AnfNodePtr> new_node_input = {NewValueNode(pyop_instance), node};
  95. if (!params.empty()) {
  96. for (auto &param : params) {
  97. AnfNodePtr val = NewValueNode(param.first.second);
  98. MS_EXCEPTION_IF_NULL(val);
  99. int64_t position = param.second;
  100. (void)new_node_input.insert(new_node_input.begin() + position, val);
  101. }
  102. }
  103. // if the op have 'group' attr, set the rank list name for the op
  104. SetCommunicationOpGroupLabel(new_node_input);
  105. return new_node_input;
  106. }
  107. void InsertNode(const Operator &op, const CNodePtr &node, size_t index, const AnfNodePtr &pre_node,
  108. const FuncGraphPtr &func_graph, const std::string &instance_name) {
  109. // insert new node before the node
  110. FuncGraphManagerPtr manager = func_graph->manager();
  111. MS_EXCEPTION_IF_NULL(manager);
  112. ScopePtr scope = node->scope();
  113. MS_EXCEPTION_IF_NULL(scope);
  114. std::vector<AnfNodePtr> node_input = CreateInput(op, pre_node, instance_name);
  115. CNodePtr new_node = func_graph->NewCNode(node_input);
  116. MS_EXCEPTION_IF_NULL(new_node);
  117. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  118. new_node->set_in_forward_flag(true); // mark forward flag
  119. }
  120. auto new_node_value = node_input[0]->cast<ValueNodePtr>();
  121. MS_EXCEPTION_IF_NULL(new_node_value);
  122. PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
  123. new_node_prim->set_instance_name(instance_name);
  124. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  125. new_node->set_scope(scope);
  126. node_input[0]->set_scope(scope);
  127. manager->SetEdge(node, SizeToLong(index), new_node);
  128. MS_LOG(INFO) << "Insert " << instance_name << " success";
  129. }
  130. bool ParameterIsCloned(const AnfNodePtr &parameter_node) {
  131. MS_EXCEPTION_IF_NULL(parameter_node);
  132. auto cloned_parameter = parameter_node->cast<ParameterPtr>();
  133. MS_EXCEPTION_IF_NULL(cloned_parameter);
  134. // find the clone parameter
  135. if (!cloned_parameter->has_default()) {
  136. return false;
  137. }
  138. auto param_value = cloned_parameter->param_info();
  139. if (param_value == nullptr) {
  140. return false;
  141. }
  142. bool cloned = param_value->cloned();
  143. if (!cloned) {
  144. return false;
  145. }
  146. MS_LOG(INFO) << "The parameter: " << cloned_parameter->name() << " is cloned";
  147. return true;
  148. }
  149. std::vector<AnfNodePtr> CreateMirrorInput(const FuncGraphPtr &root, const Operator &op, const AnfNodePtr &node,
  150. const std::string &instance_name, const std::string &weight_name) {
  151. MS_EXCEPTION_IF_NULL(root);
  152. MS_EXCEPTION_IF_NULL(node);
  153. MS_EXCEPTION_IF_NULL(root->manager());
  154. AnfNodePtr grad_accu = nullptr;
  155. std::string op_name = op.first;
  156. OperatorArgs arg_forward = op.second;
  157. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  158. if (grad_accumulation_step > 1) {
  159. auto parameters = root->parameters();
  160. bool find_grad_accu_node = false;
  161. for (auto &param : parameters) {
  162. if (!ParameterIsCloned(param)) {
  163. continue;
  164. }
  165. auto param_ptr = param->cast<ParameterPtr>();
  166. MS_EXCEPTION_IF_NULL(param_ptr);
  167. if (param_ptr->name().find(weight_name) != std::string::npos &&
  168. param_ptr->name().find(ACCU_GRADS) != std::string::npos) {
  169. find_grad_accu_node = true;
  170. grad_accu = param;
  171. MS_LOG(INFO) << "Find the accumulation grad node: " << param_ptr->name();
  172. break;
  173. }
  174. }
  175. if (!find_grad_accu_node) {
  176. if (op_name == MIRROR_MINI_STEP_OPERATOR) {
  177. op_name = MIRROR_OPERATOR;
  178. arg_forward.first.pop_back();
  179. } else if (op_name == MINI_STEP_ALL_GATHER) {
  180. MS_LOG(EXCEPTION) << "You should define `accu_grads` when enable gradient accumulation.";
  181. }
  182. }
  183. }
  184. ValuePtr pyop_instance = CreatOpInstance(arg_forward.first, op_name, instance_name);
  185. MS_EXCEPTION_IF_NULL(pyop_instance);
  186. OperatorParams params = arg_forward.second;
  187. std::vector<AnfNodePtr> new_node_input;
  188. if (op_name == MIRROR_MINI_STEP_OPERATOR || op_name == MINI_STEP_ALL_GATHER) {
  189. new_node_input = {NewValueNode(pyop_instance), node, grad_accu};
  190. MS_LOG(INFO) << "Insert the grad accumulation node as the mirror op's input";
  191. } else {
  192. new_node_input = {NewValueNode(pyop_instance), node};
  193. }
  194. if (!params.empty()) {
  195. for (auto &param : params) {
  196. AnfNodePtr val = NewValueNode(param.first.second);
  197. MS_EXCEPTION_IF_NULL(val);
  198. int64_t position = param.second;
  199. (void)new_node_input.insert(new_node_input.begin() + position, val);
  200. }
  201. }
  202. // if the op have 'group' attr, set the rank list name for the op
  203. SetCommunicationOpGroupLabel(new_node_input);
  204. // gradient accumulation
  205. if (grad_accumulation_step > 1) {
  206. SetMiniStepOpDoMirrorLabel(new_node_input, root->has_flag(ACCUMULATION));
  207. }
  208. return new_node_input;
  209. }
  210. void InsertMirrorNode(const FuncGraphPtr &root, const Operator &op, const CNodePtr &node, size_t index,
  211. const AnfNodePtr &pre_node, const FuncGraphPtr &func_graph, const std::string &instance_name,
  212. const std::string &param_name) {
  213. // insert new node before the node
  214. FuncGraphManagerPtr manager = func_graph->manager();
  215. MS_EXCEPTION_IF_NULL(manager);
  216. ScopePtr scope = node->scope();
  217. MS_EXCEPTION_IF_NULL(scope);
  218. std::vector<AnfNodePtr> node_input = CreateMirrorInput(root, op, pre_node, instance_name, param_name);
  219. CNodePtr new_node = func_graph->NewCNode(node_input);
  220. MS_EXCEPTION_IF_NULL(new_node);
  221. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  222. new_node->set_in_forward_flag(true); // mark forward flag
  223. }
  224. auto new_node_value = node_input[0]->cast<ValueNodePtr>();
  225. MS_EXCEPTION_IF_NULL(new_node_value);
  226. PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
  227. new_node_prim->set_instance_name(instance_name);
  228. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  229. new_node->set_scope(scope);
  230. node_input[0]->set_scope(scope);
  231. manager->SetEdge(node, SizeToLong(index), new_node);
  232. MS_LOG(INFO) << "Insert " << instance_name << " success";
  233. }
  234. // Replace pre_node with pre_node->op
  235. static CNodePtr ReplaceNode(const Operator &op, const AnfNodePtr &pre_node, const FuncGraphPtr &func_graph,
  236. const std::string &instance_name) {
  237. // insert new node before the node
  238. FuncGraphManagerPtr manager = func_graph->manager();
  239. MS_EXCEPTION_IF_NULL(manager);
  240. ScopePtr scope = pre_node->scope();
  241. MS_EXCEPTION_IF_NULL(scope);
  242. std::vector<AnfNodePtr> node_input = CreateInput(op, pre_node, instance_name);
  243. CNodePtr new_node = func_graph->NewCNode(node_input);
  244. MS_EXCEPTION_IF_NULL(new_node);
  245. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  246. new_node->set_in_forward_flag(true); // mark forward flag
  247. }
  248. auto new_node_prim = GetValueNode<PrimitivePtr>(node_input[0]);
  249. new_node_prim->set_instance_name(instance_name);
  250. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  251. new_node->set_scope(scope);
  252. node_input[0]->set_scope(scope);
  253. manager->Replace(pre_node, new_node);
  254. MS_LOG(INFO) << "Insert " << instance_name << " success";
  255. return new_node;
  256. }
  257. // Replace pre_node with pre_node->op
  258. static CNodePtr ReplaceMirrorNode(const FuncGraphPtr &root, const Operator &op, const AnfNodePtr &pre_node,
  259. const FuncGraphPtr &func_graph, const std::string &instance_name,
  260. const std::string &param_name) {
  261. // insert new node before the node
  262. FuncGraphManagerPtr manager = func_graph->manager();
  263. MS_EXCEPTION_IF_NULL(manager);
  264. ScopePtr scope = pre_node->scope();
  265. MS_EXCEPTION_IF_NULL(scope);
  266. std::vector<AnfNodePtr> node_input = CreateMirrorInput(root, op, pre_node, instance_name, param_name);
  267. CNodePtr new_node = func_graph->NewCNode(node_input);
  268. MS_EXCEPTION_IF_NULL(new_node);
  269. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  270. new_node->set_in_forward_flag(true); // mark forward flag
  271. }
  272. auto new_node_prim = GetValueNode<PrimitivePtr>(node_input[0]);
  273. new_node_prim->set_instance_name(instance_name);
  274. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  275. new_node->set_scope(scope);
  276. node_input[0]->set_scope(scope);
  277. manager->Replace(pre_node, new_node);
  278. MS_LOG(INFO) << "Insert " << instance_name << " success";
  279. return new_node;
  280. }
  281. std::string CreateInstanceName(const CNodePtr &node, size_t index) {
  282. MS_EXCEPTION_IF_NULL(node);
  283. if (!IsValueNode<Primitive>(node->input(0))) {
  284. MS_LOG(EXCEPTION) << "CreateInstanceName: " << node->ToString() << " doesn't have primitive";
  285. }
  286. std::string name_base = node->fullname_with_scope();
  287. std::string name = name_base + "_" + std::to_string(index);
  288. std::string instance_name = HashInstanceName(name);
  289. return instance_name;
  290. }
  291. void ForwardCommunication(OperatorVector forward_op, const CNodePtr &node) {
  292. MS_EXCEPTION_IF_NULL(node);
  293. // step1:get graph manager distribute_operator
  294. FuncGraphPtr func_graph = node->func_graph();
  295. MS_EXCEPTION_IF_NULL(func_graph);
  296. FuncGraphManagerPtr manager = func_graph->manager();
  297. MS_EXCEPTION_IF_NULL(manager);
  298. auto uses_set = manager->node_users()[node];
  299. CNodePtr node_to_insert = node;
  300. for (auto &uses_pair : uses_set) {
  301. auto uses_cnode = uses_pair.first->cast<CNodePtr>();
  302. MS_EXCEPTION_IF_NULL(uses_cnode);
  303. if (!IsValueNode<Primitive>(uses_cnode->input(0))) {
  304. break;
  305. }
  306. PrimitivePtr value_node_prim = GetValueNode<PrimitivePtr>(uses_cnode->input(0));
  307. MS_EXCEPTION_IF_NULL(value_node_prim);
  308. if (value_node_prim->name() == prim::kTupleGetItem) {
  309. if (uses_set.size() > 1) {
  310. MS_LOG(EXCEPTION) << "Now only support one output, but got " << uses_set.size();
  311. }
  312. node_to_insert = uses_cnode;
  313. }
  314. }
  315. MS_EXCEPTION_IF_NULL(node_to_insert);
  316. std::reverse(forward_op.begin(), forward_op.end());
  317. // step2:traverse op_list and insert node
  318. for (size_t index = 0; index < forward_op.size(); ++index) {
  319. std::string instance_name_base = FORWARD_OP;
  320. std::string instance_name = instance_name_base + "_" + CreateInstanceName(node, index);
  321. std::vector<AnfNodePtr> forward_input = CreateInput(forward_op[index], node_to_insert, instance_name);
  322. CNodePtr forward_node = func_graph->NewCNode(forward_input); // using NewCNode to create anfnode
  323. MS_EXCEPTION_IF_NULL(forward_node);
  324. ScopePtr scope = node->scope();
  325. MS_EXCEPTION_IF_NULL(scope);
  326. forward_node->set_scope(scope);
  327. forward_node->set_in_forward_flag(true);
  328. forward_input[0]->set_scope(scope);
  329. (void)manager->Replace(node_to_insert, forward_node); // using Replace function to insert node
  330. }
  331. }
  332. CNodePtr InsertMakeTuple(const AnfNodePtr &prev, uint64_t num, const FuncGraphPtr &func_graph) {
  333. MS_EXCEPTION_IF_NULL(prev);
  334. MS_EXCEPTION_IF_NULL(func_graph);
  335. std::vector<AnfNodePtr> make_tuple_inputs;
  336. make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
  337. for (uint64_t i = 0; i < num; i++) {
  338. std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), prev,
  339. CreatInt64Imm(UlongToLong(i))};
  340. auto tuple_get_item = func_graph->NewCNode(tuple_get_item_inputs);
  341. MS_EXCEPTION_IF_NULL(tuple_get_item);
  342. make_tuple_inputs.push_back(tuple_get_item);
  343. }
  344. auto make_tuple = func_graph->NewCNode(make_tuple_inputs);
  345. MS_EXCEPTION_IF_NULL(make_tuple);
  346. FuncGraphManagerPtr manager = func_graph->manager();
  347. MS_EXCEPTION_IF_NULL(manager);
  348. (void)manager->Replace(prev, make_tuple);
  349. return make_tuple;
  350. }
  351. void InsertRedistribution(const RedistributionOpListPtr &redistribution_oplist_ptr, const CNodePtr &node,
  352. const FuncGraphPtr &func_graph, int64_t pos, const CNodePtr &pre_node) {
  353. MS_EXCEPTION_IF_NULL(node);
  354. MS_EXCEPTION_IF_NULL(pre_node);
  355. MS_EXCEPTION_IF_NULL(func_graph);
  356. FuncGraphManagerPtr manager = func_graph->manager();
  357. MS_EXCEPTION_IF_NULL(manager);
  358. if ((redistribution_oplist_ptr->first).size() != (redistribution_oplist_ptr->second).size()) {
  359. MS_LOG(EXCEPTION) << "size of OperatorVector and OutPutInfoVector must be the same!";
  360. }
  361. for (size_t index = 0; index < (redistribution_oplist_ptr->first).size(); ++index) {
  362. if (pos >= SizeToLong(node->inputs().size())) {
  363. MS_LOG(EXCEPTION) << "InsertRedistribution:pos can't be larger than node's inputs'size";
  364. }
  365. // Create new node
  366. AnfNodePtr target_node = node->input(LongToSize(pos));
  367. MS_EXCEPTION_IF_NULL(target_node);
  368. // Create instance_name
  369. auto op = (redistribution_oplist_ptr->first)[index];
  370. std::string op_name = (redistribution_oplist_ptr->first)[index].first;
  371. std::string instance_name_base = REDISTRIBUTION_OP;
  372. std::string instance_name = instance_name_base + "_" + CreateInstanceName(pre_node, index) + op_name;
  373. InsertNode(op, node, LongToSize(pos), target_node, func_graph, instance_name);
  374. if ((redistribution_oplist_ptr->second)[index].first) {
  375. target_node = node->input(LongToSize(pos));
  376. MS_EXCEPTION_IF_NULL(target_node);
  377. (void)InsertMakeTuple(target_node, (redistribution_oplist_ptr->second)[index].second, func_graph);
  378. }
  379. }
  380. }
  381. void InsertGetTensorSliceOp(const Operator &op, const CNodePtr &node, const FuncGraphPtr &func_graph, int64_t pos,
  382. const std::string &instance_name) {
  383. if (func_graph == nullptr) {
  384. MS_LOG(EXCEPTION) << "InsertGetTensorSliceOp: the graph is null, the instance name is " << instance_name;
  385. }
  386. FuncGraphManagerPtr manager = func_graph->manager();
  387. MS_EXCEPTION_IF_NULL(manager);
  388. if (pos >= SizeToLong(node->inputs().size())) {
  389. MS_LOG(EXCEPTION) << "InsertGetTensorSliceOp: pos can't be larger than node's inputs'size, the instance name is "
  390. << instance_name;
  391. }
  392. // Create new node
  393. AnfNodePtr pre_node = node->input(LongToSize(pos));
  394. MS_EXCEPTION_IF_NULL(pre_node);
  395. InsertNode(op, node, LongToSize(pos), pre_node, func_graph, instance_name);
  396. }
  397. TensorLayout GetTensorInLayout(const CNodePtr &middle_node, const PrimitivePtr &middle_prim,
  398. const OperatorInfoPtr &distribute_operator) {
  399. TensorInfo tensorinfo_in;
  400. if (middle_prim->name() == prim::kTupleGetItem) {
  401. auto value_node = middle_node->input(2)->cast<ValueNodePtr>();
  402. MS_EXCEPTION_IF_NULL(value_node);
  403. size_t index_s = LongToSize(GetValue<int64_t>(value_node->value()));
  404. if (index_s >= distribute_operator->outputs_tensor_info().size()) {
  405. MS_LOG(EXCEPTION) << "The index out of range, index: " << index_s
  406. << ", vector size: " << distribute_operator->outputs_tensor_info().size();
  407. }
  408. tensorinfo_in = distribute_operator->outputs_tensor_info()[index_s];
  409. } else {
  410. if (distribute_operator->outputs_tensor_info().empty()) {
  411. MS_LOG(EXCEPTION) << "The outputs tensor info is empty";
  412. }
  413. tensorinfo_in = distribute_operator->outputs_tensor_info()[0];
  414. }
  415. return tensorinfo_in.tensor_layout();
  416. }
  417. std::string GetPrimName(const CNodePtr &node) {
  418. MS_EXCEPTION_IF_NULL(node);
  419. if (!IsValueNode<Primitive>(node->input(0))) {
  420. MS_LOG(EXCEPTION) << "The node is not a primitive";
  421. }
  422. auto value_node = node->input(0)->cast<ValueNodePtr>();
  423. auto prim = GetValueNode<PrimitivePtr>(value_node);
  424. MS_EXCEPTION_IF_NULL(prim);
  425. return prim->name();
  426. }
  427. OperatorInfoPtr GetDistributeOperator(const CNodePtr &node) {
  428. MS_EXCEPTION_IF_NULL(node);
  429. if (!IsParallelCareNode(node)) {
  430. return nullptr;
  431. }
  432. OperatorInfoPtr distribute_operator = node->user_data<OperatorInfo>();
  433. if (distribute_operator == nullptr) {
  434. MS_LOG(EXCEPTION) << "Distribute operator is nullptr, the prim is " << GetPrimName(node);
  435. }
  436. return distribute_operator;
  437. }
  438. void Redistribution(const std::pair<AnfNodePtr, int64_t> &node_pair, const OperatorInfoPtr &distribute_operator,
  439. const CNodePtr &middle_node, int64_t index, TensorRedistribution tensor_redistribution,
  440. const CNodePtr &pre_node) {
  441. FuncGraphPtr func_graph = middle_node->func_graph();
  442. if (func_graph == nullptr) {
  443. MS_LOG(EXCEPTION) << "Redistribution:get graph failed";
  444. }
  445. CNodePtr next_node = node_pair.first->cast<CNodePtr>();
  446. MS_EXCEPTION_IF_NULL(next_node);
  447. auto middle_value = middle_node->input(0)->cast<ValueNodePtr>();
  448. MS_EXCEPTION_IF_NULL(middle_value);
  449. PrimitivePtr middle_prim = middle_value->value()->cast<PrimitivePtr>();
  450. MS_EXCEPTION_IF_NULL(middle_prim);
  451. OperatorInfoPtr next_distribute_operator = GetDistributeOperator(next_node);
  452. if (next_distribute_operator == nullptr) {
  453. MS_LOG(EXCEPTION) << "Failure: " << next_node->ToString() << " GetDistributeOperator failed";
  454. }
  455. RankList dev_list = distribute_operator->stage_device_list();
  456. std::string next_prim_name = GetValueNode<PrimitivePtr>(next_node->input(0))->name();
  457. MS_LOG(DEBUG) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim " << next_prim_name;
  458. MS_LOG(DEBUG) << "Redistribution: middle_node " << middle_node->ToString() << " next_node " << next_node->ToString();
  459. // extract tensor layout in and out
  460. if (distribute_operator->outputs_tensor_info().empty()) {
  461. MS_LOG(WARNING) << "pre_node's tensorinfo_in is empty, operator name is " << distribute_operator->name();
  462. return;
  463. }
  464. if (LongToSize(index - 1) >= next_distribute_operator->inputs_tensor_info().size()) {
  465. MS_LOG(WARNING) << "The index is out of range, the index is " << index - 1 << ", the vector size is "
  466. << next_distribute_operator->inputs_tensor_info().size() << "next operator name is "
  467. << next_distribute_operator->name();
  468. return;
  469. }
  470. TensorInfo tensorinfo_out = next_distribute_operator->inputs_tensor_info()[LongToSize(index - 1)];
  471. TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout();
  472. TensorLayout tensorlayout_in = GetTensorInLayout(middle_node, middle_prim, distribute_operator);
  473. if (tensor_redistribution.Init(tensorlayout_in, tensorlayout_out, dev_list) == FAILED) {
  474. MS_LOG(ERROR) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim : " << next_prim_name;
  475. MS_LOG(ERROR) << "Redistribution: middle_node " << middle_node->ToString() << " next_node "
  476. << next_node->ToString();
  477. DumpGraph(func_graph, "redistribution_error");
  478. MS_LOG(EXCEPTION) << "Failure:tensor_redistribution init failed";
  479. }
  480. RedistributionOpListPtr redistribution_oplist_ptr = tensor_redistribution.InferTensorRedistributionOperatorList();
  481. if (redistribution_oplist_ptr == nullptr) {
  482. MS_LOG(EXCEPTION) << "Failure:InferTensorRedistribution failed";
  483. }
  484. MS_LOG(DEBUG) << "Redistribution size " << redistribution_oplist_ptr->first.size();
  485. if (!redistribution_oplist_ptr->first.empty()) {
  486. // insert node before next node
  487. InsertRedistribution(redistribution_oplist_ptr, next_node, func_graph, node_pair.second, pre_node);
  488. }
  489. }
  490. bool StrategyFound(std::unordered_map<std::string, ValuePtr> attrs) {
  491. auto iter = attrs.find(STRATEGY);
  492. return !((iter == attrs.end()) || (iter->second->type_name() == NONE));
  493. }
  494. bool HasStrategy(const FuncGraphPtr &root) {
  495. AnfNodePtr ret = root->get_return();
  496. MS_EXCEPTION_IF_NULL(ret);
  497. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  498. for (auto &node : all_nodes) {
  499. auto cnode = node->cast<CNodePtr>();
  500. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  501. continue;
  502. }
  503. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  504. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  505. auto attrs = prim->attrs();
  506. if (StrategyFound(attrs)) {
  507. return true;
  508. }
  509. }
  510. return false;
  511. }
  512. bool IsCommunicationOp(const PrimitivePtr &prim) {
  513. MS_EXCEPTION_IF_NULL(prim);
  514. return (COMMUNICATION_OPS.find(prim->name()) != COMMUNICATION_OPS.end());
  515. }
  516. bool FindCommunicationOp(const std::vector<AnfNodePtr> &all_nodes) {
  517. for (auto &node : all_nodes) {
  518. MS_EXCEPTION_IF_NULL(node);
  519. if (!node->isa<CNode>()) {
  520. continue;
  521. }
  522. auto cnode = node->cast<CNodePtr>();
  523. if (!IsValueNode<Primitive>(cnode->input(0))) {
  524. continue;
  525. }
  526. ValueNodePtr prim_value_node = cnode->input(0)->cast<ValueNodePtr>();
  527. MS_EXCEPTION_IF_NULL(prim_value_node);
  528. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_value_node);
  529. MS_EXCEPTION_IF_NULL(prim);
  530. if (IsCommunicationOp(prim) && cnode->in_forward_flag()) {
  531. MS_EXCEPTION_IF_NULL(prim_value_node->scope());
  532. MS_LOG(INFO) << "The graph contain communication op: " << prim->name() << ", scope name is "
  533. << prim_value_node->scope()->name();
  534. return true;
  535. }
  536. }
  537. return false;
  538. }
  539. bool IsParallelCareNode(const CNodePtr &cnode) {
  540. MS_EXCEPTION_IF_NULL(cnode);
  541. ValueNodePtr prim_node = cnode->input(0)->cast<ValueNodePtr>();
  542. if (prim_node == nullptr) {
  543. return false;
  544. }
  545. PrimitivePtr prim = prim_node->value()->cast<PrimitivePtr>();
  546. if (prim == nullptr) {
  547. return false;
  548. }
  549. if (IsInParallelBlackList(prim)) {
  550. MS_LOG(DEBUG) << "Parallel don't care node: " << prim->name();
  551. return false;
  552. }
  553. // get_next is not in the forward graph, we need mark the get_next as the forward node
  554. if (prim->name() == GET_NEXT) {
  555. return true;
  556. }
  557. if ((prim->name() == CAST) && !cnode->has_user_data<OperatorInfo>()) {
  558. return false;
  559. }
  560. return cnode->in_forward_flag();
  561. }
  562. void StepRedistribution(const CNodePtr &node, const OperatorInfoPtr &distribute_operator, const CNodePtr &insert_node,
  563. const TensorRedistribution &tensor_redistribution, const CNodePtr &pre_node) {
  564. MS_EXCEPTION_IF_NULL(node->func_graph());
  565. FuncGraphManagerPtr manager = node->func_graph()->manager();
  566. MS_EXCEPTION_IF_NULL(manager);
  567. AnfNodeIndexSet node_set = manager->node_users()[node];
  568. CNodePtr insert_node_new;
  569. if (AnfNodeIsPrimitive(node, MAKE_TUPLE) || AnfNodeIsPrimitive(node, MAKE_LIST)) {
  570. MS_LOG(INFO) << "No need to insert redistribution op between make_tuple node and the next node";
  571. return;
  572. }
  573. if (IsValueNode<Primitive>(node->input(0))) {
  574. auto current_value = node->input(0)->cast<ValueNodePtr>();
  575. MS_EXCEPTION_IF_NULL(current_value);
  576. PrimitivePtr current_prim = current_value->value()->cast<PrimitivePtr>();
  577. MS_EXCEPTION_IF_NULL(current_prim);
  578. insert_node_new = ((current_prim->name() == prim::kTupleGetItem) ? node : insert_node);
  579. } else {
  580. insert_node_new = insert_node;
  581. }
  582. MS_EXCEPTION_IF_NULL(insert_node_new);
  583. for (auto &node_pair : node_set) {
  584. CNodePtr use_cnode = node_pair.first->cast<CNodePtr>();
  585. MS_EXCEPTION_IF_NULL(use_cnode);
  586. if (!IsValueNode<Primitive>(use_cnode->input(0))) {
  587. StepRedistribution(use_cnode, distribute_operator, insert_node_new, tensor_redistribution, pre_node);
  588. } else {
  589. ValueNodePtr prim_anf_node = use_cnode->input(0)->cast<ValueNodePtr>();
  590. MS_EXCEPTION_IF_NULL(prim_anf_node);
  591. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  592. MS_EXCEPTION_IF_NULL(node_prim);
  593. if ((node_prim->name() == DEPEND && node_pair.second != 1) || node_prim->name() == UPDATESTATE) {
  594. continue;
  595. }
  596. if (IsParallelCareNode(use_cnode) && use_cnode->has_user_data<OperatorInfo>()) {
  597. Redistribution(node_pair, distribute_operator, insert_node_new, node_pair.second, tensor_redistribution,
  598. pre_node);
  599. } else {
  600. StepRedistribution(use_cnode, distribute_operator, insert_node_new, tensor_redistribution, pre_node);
  601. }
  602. }
  603. }
  604. }
  605. void SplitTensor(const AnfNodePtr &node, const CNodePtr &next_node, int64_t index) {
  606. MS_EXCEPTION_IF_NULL(node);
  607. MS_EXCEPTION_IF_NULL(next_node);
  608. OperatorInfoPtr op_info = next_node->user_data<OperatorInfo>();
  609. MS_EXCEPTION_IF_NULL(op_info);
  610. // If the shape of tensor is [] or [1], no need to split it.
  611. Shapes shapes = GetNodeShape(node);
  612. if (shapes.size() != 1) {
  613. MS_LOG(EXCEPTION) << "Split tensor for " << op_info->name()
  614. << ": GetNodeShape for tensor_node, output size is not 1";
  615. }
  616. Shape shape = shapes[0];
  617. std::string shape_str = ShapeToString(shape);
  618. if (shape.empty() || ((shape.size() == 1) && (shape[0] == 1))) {
  619. MS_LOG(INFO) << "Split tensor for " << op_info->name() << ": The shape is " << shape_str
  620. << ", no need to split it.";
  621. return;
  622. }
  623. MS_LOG(INFO) << "Split tensor for " << op_info->name() << ": The shape of tensor is " << shape_str;
  624. // extract tensor layout
  625. if (LongToSize(index - 1) >= op_info->inputs_tensor_info().size()) {
  626. MS_LOG(EXCEPTION) << "The index is out of range, index is " << index - 1 << ", vector size is "
  627. << op_info->inputs_tensor_info().size();
  628. }
  629. TensorInfo tensor_info = op_info->inputs_tensor_info()[LongToSize(index - 1)];
  630. TensorLayout tensor_layout = tensor_info.tensor_layout();
  631. // Use _GetTensorSlice operator to split the tensor
  632. FuncGraphPtr func_graph = next_node->func_graph(); // only cnode can get the graph
  633. MS_EXCEPTION_IF_NULL(func_graph);
  634. Operator op = CreateGetTensorSliceOp(tensor_layout);
  635. InsertGetTensorSliceOp(op, next_node, func_graph, index, SPLIT_TENSOR);
  636. if (!op_info->sub_ops().empty()) {
  637. auto sub_ops = op_info->sub_ops();
  638. for (size_t i = 0; i < sub_ops.size(); i++) {
  639. if (!sub_ops.at(i).empty()) {
  640. InsertGetTensorSliceOp(sub_ops.at(i).at(0), next_node, func_graph, index, SUB);
  641. }
  642. }
  643. }
  644. }
  645. void SplitTensorList(const AnfNodePtr &node, const CNodePtr &next_node, int index) {
  646. MS_EXCEPTION_IF_NULL(node);
  647. MS_EXCEPTION_IF_NULL(next_node);
  648. if (next_node->inputs().size() != 2 || index != 1) {
  649. MS_LOG(INFO) << next_node->fullname_with_scope() << " Inputs must have only one input, get "
  650. << next_node->inputs().size() - 1 << " index should be 1, get " << index;
  651. return;
  652. }
  653. OperatorInfoPtr op_info = next_node->user_data<OperatorInfo>();
  654. MS_EXCEPTION_IF_NULL(op_info);
  655. std::vector<ValuePtr> inputs_values;
  656. if (IsValueNode<ValueList>(node)) {
  657. inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
  658. } else {
  659. inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
  660. }
  661. if (inputs_values.size() != op_info->inputs_tensor_info().size()) {
  662. MS_LOG(EXCEPTION) << "The inputs size " << inputs_values.size() << ", is not equal to inputs shape size "
  663. << op_info->inputs_tensor_info().size();
  664. }
  665. std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)};
  666. FuncGraphPtr func_graph = next_node->func_graph();
  667. MS_EXCEPTION_IF_NULL(func_graph);
  668. FuncGraphManagerPtr manager = func_graph->manager();
  669. MS_EXCEPTION_IF_NULL(manager);
  670. ScopePtr scope = next_node->scope();
  671. MS_EXCEPTION_IF_NULL(scope);
  672. for (size_t i = 0; i < inputs_values.size(); ++i) {
  673. auto value_ptr = inputs_values[i];
  674. auto tensor = value_ptr->cast<tensor::TensorPtr>();
  675. MS_EXCEPTION_IF_NULL(tensor);
  676. TensorInfo tensor_info = op_info->inputs_tensor_info()[i];
  677. TensorLayout tensor_layout = tensor_info.tensor_layout();
  678. auto value_node = NewValueNode(value_ptr)->cast<AnfNodePtr>();
  679. Operator op = CreateGetTensorSliceOp(tensor_layout);
  680. std::vector<AnfNodePtr> node_input = CreateInput(op, value_node, SPLIT_TENSOR);
  681. CNodePtr new_node = func_graph->NewCNode(node_input);
  682. new_node->set_in_forward_flag(true);
  683. auto new_node_value = node_input[0]->cast<ValueNodePtr>();
  684. MS_EXCEPTION_IF_NULL(new_node_value);
  685. PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
  686. new_node_prim->set_instance_name(SPLIT_TENSOR);
  687. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  688. new_node->set_scope(scope);
  689. node_input[0]->set_scope(scope);
  690. make_tuple_inputs.push_back(new_node);
  691. }
  692. CNodePtr make_tuple = func_graph->NewCNode(make_tuple_inputs);
  693. manager->Replace(node, make_tuple);
  694. }
  695. void StepSplitTensor(const AnfNodePtr &node, const FuncGraphManagerPtr &manager) {
  696. MS_EXCEPTION_IF_NULL(node);
  697. MS_EXCEPTION_IF_NULL(manager);
  698. AnfNodeIndexSet node_set = manager->node_users()[node];
  699. for (auto &node_pair : node_set) {
  700. CNodePtr use_cnode = node_pair.first->cast<CNodePtr>();
  701. if (use_cnode == nullptr || !IsValueNode<Primitive>(use_cnode->input(0))) {
  702. continue;
  703. }
  704. ValueNodePtr prim_anf_node = use_cnode->input(0)->cast<ValueNodePtr>();
  705. MS_EXCEPTION_IF_NULL(prim_anf_node);
  706. PrimitivePtr use_cnode_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  707. MS_EXCEPTION_IF_NULL(use_cnode_prim);
  708. if (use_cnode_prim->name() == DEPEND && node_pair.second != 1) {
  709. continue;
  710. }
  711. if (IsParallelCareNode(use_cnode)) {
  712. if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  713. SplitTensorList(node, use_cnode, node_pair.second);
  714. } else {
  715. SplitTensor(node, use_cnode, node_pair.second);
  716. }
  717. }
  718. }
  719. }
  720. std::vector<AnfNodePtr> ReplaceOpInput(const Operator &replace_op, const std::string &instance_name,
  721. const CNodePtr &node) {
  722. OperatorArgs arg_replace_op = replace_op.second;
  723. ValuePtr pyop_instance = CreatOpInstance(arg_replace_op.first, replace_op.first, instance_name);
  724. if (pyop_instance == nullptr) {
  725. MS_LOG(EXCEPTION) << "Failure: " << replace_op.first << " CreatOpInstance failed";
  726. }
  727. OperatorParams params = arg_replace_op.second;
  728. if (node->inputs().size() < 2) {
  729. // GetNext operator dose not has input
  730. if (node->inputs().size() == 1) {
  731. return {NewValueNode(pyop_instance)};
  732. }
  733. MS_LOG(EXCEPTION) << "Failure: " << node->ToString() << " size is smaller than 2";
  734. }
  735. std::vector<AnfNodePtr> replace_input = {NewValueNode(pyop_instance), node->input(1)};
  736. if (replace_op.first == EMBEDDING_LOOKUP) {
  737. replace_input = {NewValueNode(pyop_instance), node->input(1), node->input(2)};
  738. }
  739. if (!params.empty()) {
  740. Param param_first = *(params.begin());
  741. int64_t first_position = param_first.second;
  742. if (first_position == 1) {
  743. replace_input.pop_back();
  744. }
  745. for (auto &param : params) {
  746. AnfNodePtr val = NewValueNode(param.first.second);
  747. if (val == nullptr) {
  748. MS_LOG(EXCEPTION) << "Failure:val is nullptr";
  749. }
  750. int64_t position = param.second;
  751. (void)replace_input.insert(replace_input.begin() + position, val);
  752. }
  753. }
  754. return replace_input;
  755. }
  756. void ReplaceOneOp(const Operator &replace_op, const CNodePtr &node) {
  757. FuncGraphPtr func_graph = node->func_graph();
  758. MS_EXCEPTION_IF_NULL(func_graph);
  759. FuncGraphManagerPtr manager = func_graph->manager();
  760. if (manager == nullptr) {
  761. MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr";
  762. }
  763. std::string instance_name = CreateInstanceName(node, 0);
  764. std::vector<AnfNodePtr> replace_input;
  765. replace_input = ReplaceOpInput(replace_op, instance_name, node);
  766. if (node->inputs().size() == DROPOUT_DO_MASK_CNODE_INPUT_SIZE) {
  767. replace_input.push_back(node->input(3));
  768. }
  769. CNodePtr replace_node = func_graph->NewCNode(replace_input);
  770. MS_EXCEPTION_IF_NULL(replace_node);
  771. ScopePtr scope = node->scope();
  772. MS_EXCEPTION_IF_NULL(scope);
  773. replace_node->set_scope(scope);
  774. replace_node->set_in_forward_flag(true);
  775. replace_input[0]->set_scope(scope);
  776. (void)manager->Replace(node, replace_node);
  777. }
  778. void StepReplaceOp(OperatorVector replace_op, const CNodePtr &node) {
  779. // step1:get graph manager distribute_operator
  780. OperatorInfoPtr distribute_operator = node->user_data<OperatorInfo>();
  781. if (distribute_operator == nullptr) {
  782. MS_LOG(EXCEPTION) << "Failure:AddNode error since distribute_operator is nullptr";
  783. }
  784. FuncGraphPtr func_graph = node->func_graph();
  785. MS_EXCEPTION_IF_NULL(func_graph);
  786. FuncGraphManagerPtr manager = func_graph->manager();
  787. if (manager == nullptr) {
  788. MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr";
  789. }
  790. // step2:traverse op_list and insert node
  791. std::reverse(replace_op.begin(), replace_op.end());
  792. auto replace_op_info = distribute_operator->replace_op_info();
  793. std::reverse(replace_op_info.begin(), replace_op_info.end());
  794. if (!replace_op_info.empty() && replace_op_info.size() != replace_op.size()) {
  795. MS_LOG(EXCEPTION) << "replace_op_info is not empty and size not equal to replace_op!";
  796. }
  797. bool replace_op_info_flag = !replace_op_info.empty();
  798. for (size_t index = 0; index < replace_op.size(); ++index) {
  799. std::string instance_name = CreateInstanceName(node, index);
  800. std::vector<AnfNodePtr> replace_input;
  801. if (index != replace_op.size() - 1) {
  802. replace_input = CreateInput(replace_op[index], node, instance_name);
  803. } else {
  804. replace_input = ReplaceOpInput(replace_op[index], instance_name, node);
  805. }
  806. CNodePtr replace_node = func_graph->NewCNode(replace_input);
  807. MS_EXCEPTION_IF_NULL(replace_node);
  808. ScopePtr scope = node->scope();
  809. MS_EXCEPTION_IF_NULL(scope);
  810. replace_node->set_scope(scope);
  811. PrimitivePtr prim = GetValueNode<PrimitivePtr>(replace_node->input(0));
  812. if (prim->name() == EMBEDDING_LOOKUP) {
  813. auto attrs = prim->attrs();
  814. attrs[TARGET] = MakeValue(CPU);
  815. (void)prim->SetAttrs(attrs);
  816. }
  817. if (index == replace_op.size() - 1) {
  818. replace_node->set_user_data<OperatorInfo>(node->user_data<OperatorInfo>());
  819. }
  820. replace_node->set_in_forward_flag(true);
  821. replace_input[0]->set_scope(scope);
  822. if (replace_op_info_flag && replace_op_info[index].first) {
  823. auto new_cnode = InsertMakeTuple(replace_node, replace_op_info[index].second, func_graph);
  824. (void)manager->Replace(node, new_cnode); // using Replace function to insert node
  825. } else {
  826. (void)manager->Replace(node, replace_node); // using Replace function to insert node
  827. }
  828. }
  829. MS_LOG(INFO) << "Insert ReplaceOp success for " << distribute_operator->name();
  830. }
  831. bool IsSomePrimitive(const CNodePtr &cnode, const std::string &name) {
  832. ValueNodePtr anf_node = cnode->input(0)->cast<ValueNodePtr>();
  833. MS_EXCEPTION_IF_NULL(anf_node);
  834. PrimitivePtr prim = anf_node->value()->cast<PrimitivePtr>();
  835. return (prim->name() == name);
  836. }
  837. void StepReplaceGraph(const ReplaceGraphPtr &replace_graph, const CNodePtr &node) {
  838. MS_EXCEPTION_IF_NULL(replace_graph);
  839. MS_EXCEPTION_IF_NULL(node);
  840. MS_EXCEPTION_IF_NULL(replace_graph->second);
  841. FuncGraphPtr func_graph = node->func_graph();
  842. MS_EXCEPTION_IF_NULL(func_graph);
  843. FuncGraphManagerPtr manager = func_graph->manager();
  844. if (manager == nullptr) {
  845. MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr";
  846. }
  847. // Solve the input order
  848. // For example input_node:{segment_sum:1, segment_sum:2, gahter:2}
  849. // The Original code here will bind the all operations to the first inputs of these operatos
  850. // However, the segment_sum operation needs two inputs, To solve this
  851. // We maintain a dict to count the times of the same operations,
  852. // and bind the inputs according to the times of the op appears.
  853. static std::unordered_map<AnfNodePtr, int> input_map = {};
  854. static int appear_count = 0;
  855. for (auto &replace_input : replace_graph->first) {
  856. auto pre_node = node->input(LongToSize(replace_input.second));
  857. auto it = input_map.find(replace_input.first);
  858. if (it != input_map.end()) {
  859. appear_count = 1 + it->second;
  860. } else {
  861. appear_count = 1;
  862. }
  863. input_map[replace_input.first] = appear_count;
  864. manager->SetEdge(replace_input.first, appear_count, pre_node);
  865. }
  866. // "(void)manager->Replace(replace_graph->first, pre_node);" can not be called
  867. auto replace_output = replace_graph->second;
  868. MS_EXCEPTION_IF_NULL(replace_output);
  869. (void)manager->Replace(node, replace_output);
  870. }
  871. int64_t GetTupleGetItemIndex(const CNodePtr &cnode) {
  872. MS_EXCEPTION_IF_NULL(cnode);
  873. if (cnode->inputs().size() != 3) {
  874. MS_LOG(EXCEPTION) << cnode->ToString() << " size( " << cnode->inputs().size() << " ) is not 3";
  875. }
  876. if (!cnode->input(2)->isa<ValueNode>()) {
  877. MS_LOG(EXCEPTION) << "The index of tuple getitem is not a value node";
  878. }
  879. ValuePtr tuple_index_value = GetValueNode(cnode->input(2));
  880. MS_EXCEPTION_IF_NULL(tuple_index_value);
  881. if (!tuple_index_value->isa<Int64Imm>()) {
  882. MS_LOG(EXCEPTION) << "The index of tuple getitem is not int32";
  883. }
  884. return tuple_index_value->cast<Int64ImmPtr>()->value();
  885. }
  886. void InsertVirtualDivOp(const VirtualDivOp &virtual_div_op, const CNodePtr &node) {
  887. MS_EXCEPTION_IF_NULL(node);
  888. size_t node_size = node->inputs().size();
  889. FuncGraphPtr func_graph = node->func_graph();
  890. MS_EXCEPTION_IF_NULL(func_graph);
  891. FuncGraphManagerPtr manager = func_graph->manager();
  892. MS_EXCEPTION_IF_NULL(manager);
  893. for (size_t index = 1; index < node_size; ++index) {
  894. AnfNodePtr input = node->input(index);
  895. MS_EXCEPTION_IF_NULL(input);
  896. // if it is not a tensor, continue
  897. if ((!input->isa<CNode>() && !input->isa<Parameter>()) || HasAbstractMonad(input)) {
  898. MS_LOG(INFO) << "insert div op: the index " << index << " is not tensor, skip";
  899. continue;
  900. }
  901. for (size_t pos = 0; pos < virtual_div_op.size(); ++pos) {
  902. std::string instance_name = CreateInstanceName(node, pos);
  903. InsertNode(virtual_div_op[pos], node, index, node->input(index), func_graph, instance_name);
  904. }
  905. MS_LOG(INFO) << "insert div op for input index " << index << " of node";
  906. }
  907. }
  908. static std::pair<AnfNodePtr, bool> FindParameterByValueNode(const AnfNodePtr &node, const FuncGraphPtr &func_graph) {
  909. if (IsValueNode<RefKey>(node)) {
  910. std::vector<AnfNodePtr> param_v = FindParameterByRefKeyNode(node, func_graph);
  911. if (param_v.size() != 1) {
  912. MS_LOG(EXCEPTION) << "FindParameterByRefKeyNode failed, return vector size must be 1, real is "
  913. << param_v.size();
  914. }
  915. auto param_ptr = param_v[0]->user_data<parallel::TensorLayout>();
  916. if (param_ptr != nullptr && !param_ptr->opt_shard_group().empty()) {
  917. return std::make_pair(nullptr, true);
  918. }
  919. return std::make_pair(node, true);
  920. }
  921. return std::make_pair(nullptr, false);
  922. }
  923. // Only used for InsertMirrorOps
  924. std::pair<AnfNodePtr, bool> FindParameter(const AnfNodePtr &node, const FuncGraphPtr &func_graph) {
  925. if (!node->isa<Parameter>() && !node->isa<CNode>() && !node->isa<ValueNode>()) {
  926. return std::make_pair(nullptr, false);
  927. }
  928. if (node->isa<Parameter>()) {
  929. auto param_ptr = node->user_data<parallel::TensorLayout>();
  930. if (param_ptr != nullptr && !param_ptr->opt_shard_group().empty()) {
  931. return std::make_pair(nullptr, false);
  932. }
  933. return std::make_pair(node, false);
  934. }
  935. if (node->isa<ValueNode>()) {
  936. return FindParameterByValueNode(node, func_graph);
  937. }
  938. CNodePtr cnode = node->cast<CNodePtr>();
  939. MS_EXCEPTION_IF_NULL(cnode);
  940. if (!IsValueNode<Primitive>(cnode->input(0))) {
  941. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  942. if (!FindParameter(cnode->input(index), func_graph).first) {
  943. continue;
  944. }
  945. return FindParameter(cnode->input(index), func_graph);
  946. }
  947. }
  948. if (IsSomePrimitive(cnode, RECEIVE) && !cnode->has_user_data<OperatorInfo>()) {
  949. return std::make_pair(node, false);
  950. }
  951. if (IsParallelCareNode(cnode)) {
  952. return std::make_pair(nullptr, false);
  953. }
  954. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  955. MS_EXCEPTION_IF_NULL(prim_anf_node);
  956. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  957. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  958. MS_EXCEPTION_IF_NULL(prim);
  959. if ((prim->name() == DEPEND || prim->name() == LOAD) && index != 1) {
  960. continue;
  961. }
  962. if (!FindParameter(cnode->input(index), func_graph).first) {
  963. continue;
  964. }
  965. return FindParameter(cnode->input(index), func_graph);
  966. }
  967. return std::make_pair(nullptr, false);
  968. }
  969. std::pair<bool, CNodePtr> FindCNode(const AnfNodePtr &anode, const std::string &name, const FuncGraphPtr &func_graph) {
  970. MS_EXCEPTION_IF_NULL(anode);
  971. MS_EXCEPTION_IF_NULL(anode->func_graph());
  972. FuncGraphManagerPtr manager = anode->func_graph()->manager();
  973. MS_EXCEPTION_IF_NULL(manager);
  974. AnfNodeIndexSet node_set = manager->node_users()[anode];
  975. bool result = false;
  976. CNodePtr cnode_return = nullptr;
  977. for (auto &node_pair : node_set) {
  978. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  979. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  980. continue;
  981. }
  982. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  983. MS_EXCEPTION_IF_NULL(prim_anf_node);
  984. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  985. MS_EXCEPTION_IF_NULL(node_prim);
  986. if (node_prim->name() == name && node_pair.second == 1) {
  987. if (use_apply->func_graph() == func_graph) {
  988. result = true;
  989. cnode_return = use_apply;
  990. MS_LOG(INFO) << "Find Primitive " << name << " in the same func_graph";
  991. continue;
  992. }
  993. MS_LOG(INFO) << "Find Primitive " << name << " in different func_graph";
  994. }
  995. }
  996. return std::make_pair(result, cnode_return);
  997. }
  998. bool IsCastBeforMirror(const CNodePtr &node, size_t index) {
  999. // only if gradient_fp32_sync is true, pre node is cast and type is not float32 return true
  1000. if (!ParallelContext::GetInstance()->gradient_fp32_sync()) {
  1001. return false;
  1002. }
  1003. auto pre_node = node->input(index);
  1004. MS_EXCEPTION_IF_NULL(pre_node);
  1005. auto cnode = pre_node->cast<CNodePtr>();
  1006. if (cnode == nullptr || !IsValueNode<Primitive>(cnode->input(0))) {
  1007. return false;
  1008. }
  1009. auto pre_value_node = cnode->input(0)->cast<ValueNodePtr>();
  1010. MS_EXCEPTION_IF_NULL(pre_value_node);
  1011. auto pre_prim = pre_value_node->value()->cast<PrimitivePtr>();
  1012. MS_EXCEPTION_IF_NULL(pre_prim);
  1013. if (pre_prim->name() != CAST) {
  1014. return false;
  1015. }
  1016. auto node_type = pre_node->Type();
  1017. MS_EXCEPTION_IF_NULL(node_type);
  1018. if (!node_type->isa<mindspore::TensorType>()) {
  1019. MS_LOG(EXCEPTION) << "Unknown type.";
  1020. }
  1021. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  1022. MS_EXCEPTION_IF_NULL(input_element_type);
  1023. auto type_id = input_element_type->type_id();
  1024. return (type_id != kNumberTypeFloat32);
  1025. }
  1026. static bool CheckInsertMirrorOps(const MirrorOps &mirror_ops, const CNodePtr &node, size_t node_size) {
  1027. if ((node->inputs().size() == 2) && (IsValueNode<ValueSequeue>(node->input(1)))) {
  1028. MS_LOG(INFO) << "Input is ValueList, skip it.";
  1029. return false;
  1030. }
  1031. if ((node->inputs().size() == 2) &&
  1032. (AnfNodeIsPrimitive(node->input(1), MAKE_TUPLE) || AnfNodeIsPrimitive(node->input(1), MAKE_LIST))) {
  1033. MS_LOG(INFO) << "The mirror for " << GetPrimName(node) << " has handle by make_tuple node";
  1034. return false;
  1035. }
  1036. if (mirror_ops.size() != node_size - 1) {
  1037. MS_LOG(EXCEPTION) << "Mirrorops's size is wrong! mirror_ops size is " << mirror_ops.size() << ", node_size is "
  1038. << node_size - 1;
  1039. }
  1040. return true;
  1041. }
  1042. void InsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node) {
  1043. MS_EXCEPTION_IF_NULL(node);
  1044. size_t node_size = node->inputs().size();
  1045. FuncGraphPtr func_graph = node->func_graph();
  1046. MS_EXCEPTION_IF_NULL(func_graph);
  1047. FuncGraphManagerPtr manager = func_graph->manager();
  1048. MS_EXCEPTION_IF_NULL(manager);
  1049. for (auto input : node->inputs()) {
  1050. if (HasAbstractMonad(input)) {
  1051. node_size--;
  1052. }
  1053. }
  1054. if (!CheckInsertMirrorOps(mirror_ops, node, node_size)) {
  1055. return;
  1056. }
  1057. for (size_t index = 1; index < node_size; ++index) {
  1058. OperatorVector backward_op = mirror_ops[index - 1];
  1059. if (backward_op.empty()) {
  1060. continue;
  1061. }
  1062. std::pair<AnfNodePtr, bool> param_node_pair = FindParameter(node->input(index), func_graph);
  1063. if (!param_node_pair.first) {
  1064. continue;
  1065. }
  1066. auto param_ptr = param_node_pair.first->cast<ParameterPtr>();
  1067. std::string param_name;
  1068. if (param_ptr != nullptr) {
  1069. param_name = param_ptr->name();
  1070. }
  1071. // not a RefKey
  1072. if (!param_node_pair.second) {
  1073. auto next_cnode = FindCNode(param_node_pair.first, MIRROR_OPERATOR, func_graph);
  1074. // if there is already a MirrorOp in the same graph, use MirrorOp CNode as a input instead
  1075. if (next_cnode.first) {
  1076. MS_EXCEPTION_IF_NULL(next_cnode.second);
  1077. // param->cast->op, insert mirror before cast
  1078. if (node->input(index)->isa<CNode>()) {
  1079. auto pre_cnode = node->input(index)->cast<CNodePtr>();
  1080. auto pre_prim = GetValueNode<PrimitivePtr>(pre_cnode->input(0));
  1081. if (pre_prim->name() == CAST) {
  1082. manager->SetEdge(pre_cnode, 1, next_cnode.second);
  1083. continue;
  1084. }
  1085. }
  1086. manager->SetEdge(node, SizeToLong(index), next_cnode.second);
  1087. continue;
  1088. }
  1089. }
  1090. // if the parameter found is a RefKey, or no MirrorOp is found in the same graph, insert a new MirrorOp
  1091. // only one MirrorOp in backward_op
  1092. if (backward_op.size() != 1) {
  1093. MS_LOG(EXCEPTION) << "backward_op size must be 1, real is " << backward_op.size();
  1094. }
  1095. std::string instance_name = MIRROR_OP;
  1096. if (IsCastBeforMirror(node, index)) {
  1097. for (auto &op : backward_op) {
  1098. // insert new node before the node
  1099. CNodePtr cnode = node->input(index)->cast<CNodePtr>();
  1100. MS_EXCEPTION_IF_NULL(cnode);
  1101. AnfNodePtr pre_node = cnode->input(1);
  1102. InsertMirrorNode(root, op, cnode, size_t(1), pre_node, func_graph, instance_name, param_name);
  1103. auto comm_op = cnode->input(size_t(1))->cast<CNodePtr>();
  1104. // add fusion flag
  1105. AddCommOpFusionType(comm_op, param_node_pair.first);
  1106. }
  1107. continue;
  1108. }
  1109. for (auto &op : backward_op) {
  1110. AnfNodePtr pre_node = node->input(index);
  1111. InsertMirrorNode(root, op, node, index, pre_node, func_graph, instance_name, param_name);
  1112. auto comm_op = node->input(index)->cast<CNodePtr>();
  1113. // add fusion flag
  1114. // pipeline mirror would not be set, which should be supported later
  1115. AddCommOpFusionType(comm_op, param_node_pair.first);
  1116. }
  1117. }
  1118. }
  1119. void BackwardCommunication(const FuncGraphPtr &root, const OperatorInfoPtr &distribute_operator, const CNodePtr &node,
  1120. const std::vector<std::pair<CNodePtr, LossNodeInfo>> &sens_loss_pairs) {
  1121. MS_EXCEPTION_IF_NULL(distribute_operator);
  1122. MS_EXCEPTION_IF_NULL(node);
  1123. bool is_loss_cnode =
  1124. std::any_of(sens_loss_pairs.begin(), sens_loss_pairs.end(),
  1125. [node](const std::pair<CNodePtr, LossNodeInfo> &element) { return element.second.loss_node == node; });
  1126. MirrorOps mirror_ops = distribute_operator->mirror_ops();
  1127. VirtualDivOp virtual_div_op = distribute_operator->virtual_div_op();
  1128. // insert mirror op
  1129. if (!mirror_ops.empty()) {
  1130. MS_LOG(INFO) << "insert mirror op for " << distribute_operator->name();
  1131. InsertMirrorOps(root, mirror_ops, node);
  1132. }
  1133. // insert virtual div op
  1134. if (!virtual_div_op.empty() && is_loss_cnode) {
  1135. MS_LOG(INFO) << "insert virtual div op for " << distribute_operator->name();
  1136. InsertVirtualDivOp(virtual_div_op, node);
  1137. }
  1138. }
  1139. std::string GetDisOpName(const std::string &prim_name) {
  1140. std::string op_name = prim_name;
  1141. if (!prim_name.empty() && (prim_name[0] == '_')) {
  1142. op_name = prim_name.substr(1);
  1143. }
  1144. return op_name + "Info";
  1145. }
  1146. OperatorInfoPtr OperatorInstanceByName(const std::string &name, const PrimitiveAttrs &attrs,
  1147. const std::vector<Shapes> &shape_list) {
  1148. if (shape_list.size() != 2) {
  1149. MS_LOG(ERROR) << "The size of shape list is not 2";
  1150. return nullptr;
  1151. }
  1152. if (name.length() == 0) {
  1153. MS_LOG(EXCEPTION) << "Length of name is zero!";
  1154. }
  1155. std::string distribute_opname = GetDisOpName(name);
  1156. if (name == GATHERV2) {
  1157. distribute_opname = name + "PInfo";
  1158. auto data_parallel_iter = attrs.find(DATA_PARALLEL);
  1159. if (data_parallel_iter != attrs.end()) {
  1160. MS_EXCEPTION_IF_NULL(data_parallel_iter->second);
  1161. if (!data_parallel_iter->second->isa<BoolImm>()) {
  1162. MS_LOG(EXCEPTION) << ": data_parallel flag's type is not a bool.";
  1163. }
  1164. bool data_parallel = data_parallel_iter->second->cast<BoolImmPtr>()->value();
  1165. if (data_parallel) {
  1166. distribute_opname = name + "Info";
  1167. }
  1168. }
  1169. }
  1170. OperatorInfoPtr operator_ =
  1171. (OperatorInfoPtr)DynCreator::Instance().Create(distribute_opname, shape_list[0], shape_list[1], attrs, TOTAL_OPS);
  1172. if (operator_ == nullptr) {
  1173. MS_LOG(INFO) << "Create " << name << " failed";
  1174. return nullptr;
  1175. }
  1176. std::string origin_name = operator_->name();
  1177. operator_->set_name(origin_name + std::to_string(TOTAL_OPS));
  1178. MS_LOG(INFO) << "Successfully created operator " << origin_name;
  1179. ++TOTAL_OPS;
  1180. return operator_;
  1181. }
  1182. OperatorInfoPtr OperatorInstance(const PrimitivePtr &prim, const PrimitiveAttrs &attrs,
  1183. const std::vector<Shapes> &shape_list) {
  1184. MS_EXCEPTION_IF_NULL(prim);
  1185. OperatorInfoPtr operator_ = OperatorInstanceByName(prim->name(), attrs, shape_list);
  1186. if (operator_ == nullptr) {
  1187. if (IsInBatchParallelBlackList(prim)) {
  1188. MS_LOG(EXCEPTION) << "Operator " << prim->name() << " is not supported yet in auto parallel mode.";
  1189. }
  1190. MS_LOG(INFO) << "Create " << prim->name() << " failed, use batch parallel";
  1191. operator_ = OperatorInstanceByName(BATCH_PARALLEL, attrs, shape_list);
  1192. MS_EXCEPTION_IF_NULL(operator_);
  1193. }
  1194. return operator_;
  1195. }
  1196. OperatorInfoPtr NewOperatorInstance(const PrimitivePtr &prim, const PrimitiveAttrs &attrs,
  1197. std::vector<Shapes> shape_list) {
  1198. OperatorInfoPtr operator_ = OperatorInstance(prim, attrs, shape_list);
  1199. for (size_t i = 0; i < shape_list[0].size(); ++i) {
  1200. MS_LOG(INFO) << "No: " << i << " input's shape: " << ShapeToString(shape_list[0][i]);
  1201. }
  1202. return operator_;
  1203. }
  1204. StrategyPtr ExtractStrategy(std::unordered_map<std::string, ValuePtr> attrs) {
  1205. ValueTuplePtr var = attrs[STRATEGY]->cast<ValueTuplePtr>();
  1206. StrategyPtr strategyPtr;
  1207. int64_t stage_id = g_device_manager->stage_id();
  1208. MS_LOG(INFO) << "Extract information: strategy " << attrs[STRATEGY]->ToString();
  1209. if (var == nullptr) {
  1210. MS_LOG(EXCEPTION) << "Strategy value is nullptr";
  1211. }
  1212. if (var->size() > 0) {
  1213. std::vector<ValuePtr> elements = var->value();
  1214. Strategys strategy;
  1215. for (uint64_t index = 0; index < elements.size(); ++index) {
  1216. Dimensions dim;
  1217. if (elements[index]->isa<ValueSequeue>()) {
  1218. ValueTuplePtr value_tuple = elements[index]->cast<ValueTuplePtr>();
  1219. std::vector<ValuePtr> value_vector = value_tuple->value();
  1220. (void)std::transform(value_vector.begin(), value_vector.end(), std::back_inserter(dim),
  1221. [](const ValuePtr &value) { return static_cast<int64_t>(GetValue<int64_t>(value)); });
  1222. strategy.push_back(dim);
  1223. } else {
  1224. MS_LOG(EXCEPTION) << "Failure: Strategy's format is wrong! Need ValueSequence";
  1225. }
  1226. }
  1227. if (strategy.empty()) {
  1228. MS_LOG(EXCEPTION) << "ExtractStrategy: failed to extract strategy";
  1229. }
  1230. strategyPtr = NewStrategy(stage_id, strategy);
  1231. }
  1232. return strategyPtr;
  1233. }
  1234. Shapes GetValueListShape(const AnfNodePtr &node) {
  1235. Shapes shapes;
  1236. std::vector<ValuePtr> inputs_seq;
  1237. if (IsValueNode<ValueList>(node)) {
  1238. inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
  1239. } else if (IsValueNode<ValueTuple>(node)) {
  1240. inputs_seq = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
  1241. } else {
  1242. MS_LOG(EXCEPTION) << "node is eigther ValueList or ValueTuple";
  1243. }
  1244. for (auto &ele : inputs_seq) {
  1245. auto tensor = ele->cast<tensor::TensorPtr>();
  1246. MS_EXCEPTION_IF_NULL(tensor);
  1247. auto one_shape = tensor->shape();
  1248. shapes.push_back(one_shape);
  1249. }
  1250. return shapes;
  1251. }
  1252. Shapes GetNodeShape(const AnfNodePtr &node) {
  1253. MS_EXCEPTION_IF_NULL(node);
  1254. Shapes shapes;
  1255. if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  1256. return GetValueListShape(node);
  1257. }
  1258. BaseShapePtr base_shape_ptr = node->Shape();
  1259. if (node->isa<CNode>()) {
  1260. auto cnode = node->cast<CNodePtr>();
  1261. if (IsValueNode<Primitive>(cnode->input(0))) {
  1262. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  1263. MS_EXCEPTION_IF_NULL(prim);
  1264. if (prim->name() == MAKEREF) {
  1265. AnfNodePtr ref_node = cnode->input(1);
  1266. auto func_graph = cnode->func_graph();
  1267. MS_EXCEPTION_IF_NULL(ref_node);
  1268. MS_EXCEPTION_IF_NULL(func_graph);
  1269. return GetRefKeyNodeShape(ref_node, func_graph);
  1270. }
  1271. }
  1272. if (cnode->input(0)->isa<CNode>()) {
  1273. if (cnode->inputs().size() < 2) {
  1274. MS_LOG(EXCEPTION) << "GetNodeShape: " << node->ToString() << " size is smaller than 2";
  1275. }
  1276. base_shape_ptr = cnode->input(1)->Shape();
  1277. }
  1278. }
  1279. if (base_shape_ptr == nullptr) {
  1280. MS_LOG(EXCEPTION) << "GetNodeShape: " << node->ToString() << " shape_ptr is nullptr, full name is "
  1281. << node->fullname_with_scope();
  1282. }
  1283. auto tuple_shape_ptr = dyn_cast<abstract::SequeueShape>(base_shape_ptr);
  1284. if (tuple_shape_ptr != nullptr) {
  1285. auto tuple_shape = tuple_shape_ptr->shape();
  1286. for (auto &shape : tuple_shape) {
  1287. auto each_shape = dyn_cast<abstract::Shape>(shape);
  1288. MS_EXCEPTION_IF_NULL(each_shape);
  1289. shapes.push_back(each_shape->shape());
  1290. }
  1291. } else {
  1292. auto shape_ptr = dyn_cast<abstract::Shape>(base_shape_ptr);
  1293. MS_EXCEPTION_IF_NULL(shape_ptr);
  1294. shapes.push_back(shape_ptr->shape());
  1295. }
  1296. return shapes;
  1297. }
  1298. Shapes GetRefKeyNodeShape(const AnfNodePtr &node, const FuncGraphPtr &func_graph) {
  1299. MS_EXCEPTION_IF_NULL(node);
  1300. MS_EXCEPTION_IF_NULL(func_graph);
  1301. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(node, func_graph);
  1302. if (parameters.size() != 1) {
  1303. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  1304. }
  1305. Shapes input_shapes;
  1306. input_shapes = GetNodeShape(parameters[0]);
  1307. if (input_shapes.size() != 1) {
  1308. MS_LOG(EXCEPTION) << "Get input shape failed";
  1309. }
  1310. MS_LOG(INFO) << "The parameter shape is " << ShapeToString(input_shapes[0]);
  1311. return input_shapes;
  1312. }
  1313. std::vector<Shapes> ExtractShape(const CNodePtr &node) {
  1314. MS_EXCEPTION_IF_NULL(node);
  1315. Shapes shape_inputs, shape_outputs;
  1316. std::vector<Shapes> shape_all;
  1317. std::vector<AnfNodePtr> all_inputs = node->inputs();
  1318. std::vector<AnfNodePtr> node_inputs{all_inputs.begin() + 1, all_inputs.end()};
  1319. size_t inputs_size = all_inputs.size();
  1320. for (size_t i = 1; i < inputs_size; ++i) {
  1321. Shapes input_shapes;
  1322. AnfNodePtr input = all_inputs[i];
  1323. if (HasAbstractMonad(input)) {
  1324. continue;
  1325. }
  1326. if (IsValueNode<RefKey>(input)) {
  1327. auto func_graph = node->func_graph();
  1328. MS_EXCEPTION_IF_NULL(func_graph);
  1329. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  1330. if (parameters.size() != 1) {
  1331. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  1332. }
  1333. std::pair<AnfNodePtr, int64_t> node_pair = std::make_pair(node, SizeToLong(i));
  1334. g_RefMap[parameters[0]] = node_pair;
  1335. input_shapes = GetRefKeyNodeShape(input, func_graph);
  1336. } else if (input->isa<CNode>() || IsValueNode<Tensor>(input) || input->isa<Parameter>() ||
  1337. ((IsValueNode<ValueList>(input) || IsValueNode<ValueTuple>(input)) && (inputs_size == 2))) {
  1338. input_shapes = GetNodeShape(input);
  1339. } else {
  1340. continue;
  1341. }
  1342. if (input_shapes.size() != 1) {
  1343. if (inputs_size == 2) { // like concat
  1344. shape_inputs = input_shapes;
  1345. break;
  1346. } else {
  1347. MS_LOG(EXCEPTION) << "ExtractShape: Get input shape failed";
  1348. }
  1349. }
  1350. shape_inputs.push_back(input_shapes[0]);
  1351. }
  1352. shape_all.push_back(shape_inputs);
  1353. // extract out shape
  1354. shape_outputs = GetNodeShape(node);
  1355. shape_all.push_back(shape_outputs);
  1356. return shape_all;
  1357. }
  1358. std::pair<AnfNodePtr, int64_t> FindParallelCareNode(const AnfNodePtr &node, int32_t recursion_num) {
  1359. if (recursion_num >= RECURSION_LIMIT) {
  1360. return std::make_pair(nullptr, 0);
  1361. }
  1362. MS_EXCEPTION_IF_NULL(node);
  1363. FuncGraphPtr func_graph = node->func_graph();
  1364. MS_EXCEPTION_IF_NULL(func_graph);
  1365. FuncGraphManagerPtr manager = func_graph->manager();
  1366. MS_EXCEPTION_IF_NULL(manager);
  1367. AnfNodeIndexSet node_set = manager->node_users()[node];
  1368. for (auto &node_pair : node_set) {
  1369. CNodePtr cnode = node_pair.first->cast<CNodePtr>();
  1370. MS_EXCEPTION_IF_NULL(cnode);
  1371. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1372. continue;
  1373. }
  1374. ValueNodePtr prim_node_anf = cnode->input(0)->cast<ValueNodePtr>();
  1375. MS_EXCEPTION_IF_NULL(prim_node_anf);
  1376. PrimitivePtr node_prim = prim_node_anf->value()->cast<PrimitivePtr>();
  1377. MS_EXCEPTION_IF_NULL(node_prim);
  1378. if ((node_prim->name() == DEPEND && node_pair.second != 1) || IsPrimitiveCNode(cnode, prim::kPrimReceive)) {
  1379. continue;
  1380. }
  1381. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>()) {
  1382. return node_pair;
  1383. } else {
  1384. auto tmp_pair = FindParallelCareNode(node_pair.first, recursion_num + 1);
  1385. if (tmp_pair.first != nullptr) {
  1386. return tmp_pair;
  1387. }
  1388. }
  1389. }
  1390. return std::make_pair(nullptr, 0);
  1391. }
  1392. std::pair<AnfNodePtr, int64_t> FindSubGraph(const FuncGraphPtr &graph, const AnfNodePtr &parameter) {
  1393. MS_EXCEPTION_IF_NULL(graph);
  1394. MS_EXCEPTION_IF_NULL(parameter);
  1395. FuncGraphManagerPtr manager = graph->manager();
  1396. MS_EXCEPTION_IF_NULL(manager);
  1397. std::pair<AnfNodePtr, int64_t> prim_anf_node_pair = FindParallelCareNode(parameter, 0);
  1398. if (prim_anf_node_pair.first != nullptr) {
  1399. return prim_anf_node_pair;
  1400. } else {
  1401. AnfNodeIndexSet param_sub_set = manager->node_users()[parameter];
  1402. for (auto &param_pair : param_sub_set) {
  1403. CNodePtr param_cnode = param_pair.first->cast<CNodePtr>();
  1404. AnfNodePtr graph_value_node;
  1405. if (param_cnode->input(0)->isa<CNode>()) {
  1406. graph_value_node = param_cnode->input(0)->cast<CNodePtr>()->input(1);
  1407. } else {
  1408. graph_value_node = param_cnode->input(0);
  1409. }
  1410. if (!IsValueNode<FuncGraph>(graph_value_node)) {
  1411. continue;
  1412. }
  1413. FuncGraphPtr graph_sub = GetValueNode<FuncGraphPtr>(graph_value_node);
  1414. auto parameters = graph_sub->parameters();
  1415. if (LongToSize(param_pair.second - 1) >= parameters.size()) {
  1416. MS_LOG(EXCEPTION) << "The index is out of range, index is " << param_pair.second - 1 << ", vector size is "
  1417. << parameters.size();
  1418. }
  1419. std::pair<AnfNodePtr, int64_t> res = FindSubGraph(graph_sub, parameters[LongToSize(param_pair.second - 1)]);
  1420. if (res.first != nullptr) {
  1421. return res;
  1422. }
  1423. }
  1424. }
  1425. return std::make_pair(nullptr, 0);
  1426. }
  1427. static void InsertAllGatherOp(const FuncGraphPtr &root, const std::string &group, const std::pair<AnfNodePtr, int> &res,
  1428. const AnfNodePtr &node) {
  1429. MS_EXCEPTION_IF_NULL(res.first);
  1430. MS_EXCEPTION_IF_NULL(node);
  1431. auto cnode = res.first->cast<CNodePtr>();
  1432. auto graph = cnode->func_graph();
  1433. MS_EXCEPTION_IF_NULL(graph);
  1434. auto cnode_prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  1435. MS_EXCEPTION_IF_NULL(cnode_prim);
  1436. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  1437. Operator op;
  1438. CNodePtr allgather;
  1439. if (grad_accumulation_step > 1) {
  1440. op = CreateMiniStepAllGatherOp(group);
  1441. auto param_name = node->cast<ParameterPtr>()->name();
  1442. if (cnode_prim->name() == CAST) {
  1443. allgather = ReplaceMirrorNode(root, op, cnode, graph, PARALLEL_OPTIMIZER_ALLGATHER, param_name);
  1444. } else {
  1445. InsertMirrorNode(root, op, cnode, res.second, node, graph, PARALLEL_OPTIMIZER_ALLGATHER, param_name);
  1446. allgather = cnode->input(res.second)->cast<CNodePtr>();
  1447. }
  1448. } else {
  1449. op = CreateAllGatherOp(group);
  1450. if (cnode_prim->name() == CAST) {
  1451. allgather = ReplaceNode(op, cnode, graph, PARALLEL_OPTIMIZER_ALLGATHER);
  1452. } else {
  1453. InsertNode(op, cnode, res.second, node, graph, PARALLEL_OPTIMIZER_ALLGATHER);
  1454. allgather = cnode->input(res.second)->cast<CNodePtr>();
  1455. }
  1456. }
  1457. // add fusion flag
  1458. AddCommOpFusionType(allgather, node);
  1459. // add gradients mean
  1460. AddCommOpMeanFlag(allgather);
  1461. }
  1462. static void ApplyParallelOptOnParam(const FuncGraphPtr &root, const AnfNodePtr &parameter,
  1463. const std::string &opt_shard_group) {
  1464. if (opt_shard_group.empty()) {
  1465. return;
  1466. }
  1467. FuncGraphManagerPtr manager = root->manager();
  1468. MS_EXCEPTION_IF_NULL(manager);
  1469. auto param_sub_set = manager->node_users()[parameter];
  1470. for (auto &param_pair : param_sub_set) {
  1471. auto cnode = param_pair.first->cast<CNodePtr>();
  1472. MS_EXCEPTION_IF_NULL(cnode);
  1473. if (cnode->in_forward_flag()) {
  1474. OperatorInfoPtr distribute_operator = cnode->user_data<OperatorInfo>();
  1475. if (distribute_operator == nullptr) {
  1476. MS_LOG(WARNING) << "Parallel optimizer: " << cnode->ToString() << " 's OperatorInfoPtr is nullptr";
  1477. } else if (IntToSize(param_pair.second - 1) >= distribute_operator->inputs_tensor_info().size()) {
  1478. MS_LOG(EXCEPTION) << "The index is out of range, index is " << param_pair.second - 1 << ", vector size is "
  1479. << distribute_operator->inputs_tensor_info().size();
  1480. }
  1481. // insert allgather operator between shard parameter and cnode
  1482. InsertAllGatherOp(root, opt_shard_group, param_pair, parameter);
  1483. MS_LOG(INFO) << "Parallel optimizer is applied between " << parameter->ToString() << " and " << cnode->ToString();
  1484. }
  1485. }
  1486. }
  1487. // When this function returns non-empty string, that means parallel optimizer is applied on this parameter.
  1488. std::string SetParallelShape(const AnfNodePtr &parameter, const std::pair<AnfNodePtr, int64_t> &res) {
  1489. MS_EXCEPTION_IF_NULL(parameter);
  1490. AbstractBasePtr abstract = parameter->abstract();
  1491. MS_EXCEPTION_IF_NULL(abstract);
  1492. MS_LOG(DEBUG) << "SetParallelShape " << parameter->ToString() << " shape " << parameter->Shape()->ToString();
  1493. CNodePtr cnode = res.first->cast<CNodePtr>();
  1494. MS_EXCEPTION_IF_NULL(cnode);
  1495. OperatorInfoPtr distribute_operator = cnode->user_data<OperatorInfo>();
  1496. if (distribute_operator == nullptr) {
  1497. MS_LOG(EXCEPTION) << "Failure:node " << cnode->ToString() << " 's OperatorInfoPtr is nullptr";
  1498. }
  1499. if (LongToSize(res.second - 1) >= distribute_operator->inputs_tensor_info().size()) {
  1500. MS_LOG(EXCEPTION) << "The index is out of range, index is " << res.second - 1 << ", vector size is "
  1501. << distribute_operator->inputs_tensor_info().size();
  1502. }
  1503. TensorInfo tensorinfo_in = distribute_operator->inputs_tensor_info()[LongToSize(res.second - 1)];
  1504. TensorLayout tensor_layout = tensorinfo_in.tensor_layout();
  1505. Shape slice_shape = tensor_layout.slice_shape().array();
  1506. std::string opt_shard_group;
  1507. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  1508. bool enable_parallel_optimizer = ParallelContext::GetInstance()->enable_parallel_optimizer();
  1509. if (enable_parallel_optimizer) {
  1510. if (!ParameterRequireGrad(parameter)) {
  1511. // only trainable parameters need parallel optimizer
  1512. MS_LOG(INFO) << "Parallel optimizer: " << parameter->ToString() << " is not trainable parameter.";
  1513. } else if (parameter->cast<ParameterPtr>()->param_info() &&
  1514. !parameter->cast<ParameterPtr>()->param_info()->parallel_optimizer()) {
  1515. MS_LOG(INFO) << "Parallel optimizer: " << parameter->ToString() << " does not need weight shard.";
  1516. } else if (tensor_layout.GenerateOptShardSliceShape() == Status::SUCCESS) {
  1517. // get a totally shard tensor slice shape if the weight is repeated on devices
  1518. // and the shape of the first dimension could be divided
  1519. // apply parallel optimizer on parameters
  1520. // create communication group for allgather operator
  1521. slice_shape = tensor_layout.opt_shard_slice_shape();
  1522. std::vector<Group> dev_group;
  1523. if (distribute_operator->CreateGroupByTensorMap(tensor_layout.origin_tensor_map().array(), &dev_group) ==
  1524. Status::SUCCESS &&
  1525. !dev_group.empty()) {
  1526. opt_shard_group = dev_group[0].name();
  1527. // set communication group in tensor layout for checkpoint saving
  1528. tensor_layout.set_opt_shard_group(opt_shard_group);
  1529. MS_LOG(INFO) << "Parallel optimizer: create group " << opt_shard_group << " for " << parameter->ToString()
  1530. << " success.";
  1531. } else {
  1532. MS_LOG(WARNING) << "Parallel optimizer: create group for " << parameter->ToString() << " failed.";
  1533. }
  1534. } else {
  1535. MS_LOG(INFO) << "Parallel optimizer: " << parameter->ToString() << "'s shape does not satisfy the conditions.";
  1536. }
  1537. }
  1538. MS_LOG(INFO) << "SetParallelShape slice_shape " << parameter->ToString() << " shape "
  1539. << MakeValue(slice_shape)->ToString() << ", op name is " << distribute_operator->name();
  1540. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  1541. MS_EXCEPTION_IF_NULL(parallel_shape);
  1542. // Don't modify it in-place as the pointer of this AbstractValue may used as cache key in StaticAnalysis.
  1543. auto cloned_abstract = abstract->Clone();
  1544. MS_EXCEPTION_IF_NULL(cloned_abstract);
  1545. cloned_abstract->set_shape(parallel_shape);
  1546. parameter->set_abstract(cloned_abstract);
  1547. ParameterPtr parameter_ptr = parameter->cast<ParameterPtr>();
  1548. MS_EXCEPTION_IF_NULL(parameter_ptr);
  1549. parameter_ptr->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(tensor_layout));
  1550. return opt_shard_group;
  1551. }
  1552. void CoverSliceShape(const FuncGraphPtr &root) {
  1553. MS_EXCEPTION_IF_NULL(root);
  1554. auto parameters = root->parameters();
  1555. for (auto &parameter : parameters) {
  1556. MS_EXCEPTION_IF_NULL(parameter->Shape());
  1557. auto iter = g_RefMap.find(parameter);
  1558. if (iter != g_RefMap.end()) {
  1559. std::string group = SetParallelShape(parameter, g_RefMap[parameter]);
  1560. // find all forward nodes that use parameter in graphs and insert allgather if group is not empty
  1561. ApplyParallelOptOnParam(root, parameter, group);
  1562. continue;
  1563. }
  1564. std::pair<AnfNodePtr, int64_t> res = FindSubGraph(root, parameter);
  1565. if (res.first == nullptr) {
  1566. MS_LOG(INFO) << "Parameter " << parameter->ToString() << " don't need to set parallel shape";
  1567. } else {
  1568. std::string group = SetParallelShape(parameter, res);
  1569. // find all forward nodes that use parameter in graphs and insert allgather if group is not empty
  1570. ApplyParallelOptOnParam(root, parameter, group);
  1571. MS_LOG(DEBUG) << "Parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString();
  1572. }
  1573. }
  1574. g_RefMap.clear();
  1575. }
  1576. void SetClonedTensorShapeForOptimizer(const FuncGraphPtr &root) {
  1577. MS_EXCEPTION_IF_NULL(root);
  1578. for (auto &cloned_parameter_node : root->parameters()) {
  1579. MS_EXCEPTION_IF_NULL(cloned_parameter_node);
  1580. auto cloned_parameter = cloned_parameter_node->cast<ParameterPtr>();
  1581. MS_EXCEPTION_IF_NULL(cloned_parameter);
  1582. if (!ParameterIsCloned(cloned_parameter_node)) {
  1583. continue;
  1584. }
  1585. auto param_value = cloned_parameter->param_info();
  1586. if (param_value == nullptr) {
  1587. continue;
  1588. }
  1589. // get the cloned index
  1590. int64_t cloned_index = param_value->cloned_index();
  1591. // find the be cloned parameter
  1592. bool found_be_cloned_parameter = false;
  1593. ParameterPtr cloned_from_parameter = nullptr;
  1594. AnfNodePtr cloned_from_node = nullptr;
  1595. for (auto &be_cloned_parameter_node : root->parameters()) {
  1596. MS_EXCEPTION_IF_NULL(be_cloned_parameter_node);
  1597. auto be_cloned_parameter = be_cloned_parameter_node->cast<ParameterPtr>();
  1598. MS_EXCEPTION_IF_NULL(be_cloned_parameter);
  1599. if (!be_cloned_parameter->has_default()) {
  1600. continue;
  1601. }
  1602. auto param_value_in = be_cloned_parameter->param_info();
  1603. if (param_value_in == nullptr) {
  1604. continue;
  1605. }
  1606. if (!param_value_in->be_cloned()) {
  1607. continue;
  1608. }
  1609. // get the be cloned index
  1610. auto &be_cloned_index = param_value_in->be_cloned_index();
  1611. if (std::find(be_cloned_index.begin(), be_cloned_index.end(), cloned_index) != be_cloned_index.end()) {
  1612. found_be_cloned_parameter = true;
  1613. cloned_from_parameter = be_cloned_parameter;
  1614. cloned_from_node = be_cloned_parameter_node;
  1615. }
  1616. }
  1617. if (found_be_cloned_parameter) {
  1618. // set the shape and tensor layout for cloned parameter
  1619. std::string param_name = cloned_parameter_node->cast<ParameterPtr>()->name();
  1620. cloned_parameter->set_user_data<TensorLayout>(cloned_from_parameter->user_data<TensorLayout>());
  1621. MS_EXCEPTION_IF_NULL(cloned_parameter_node->abstract());
  1622. MS_EXCEPTION_IF_NULL(cloned_from_node->abstract());
  1623. auto cloned_abstract = cloned_parameter_node->abstract()->Clone();
  1624. MS_EXCEPTION_IF_NULL(cloned_abstract);
  1625. if (param_name.find(ACCU_GRADS) != std::string::npos) {
  1626. auto slice_shape = cloned_from_parameter->user_data<TensorLayout>()->slice_shape().array();
  1627. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  1628. MS_EXCEPTION_IF_NULL(parallel_shape);
  1629. cloned_abstract->set_shape(parallel_shape);
  1630. } else {
  1631. cloned_abstract->set_shape(cloned_from_node->abstract()->GetShapeTrack());
  1632. }
  1633. cloned_parameter_node->set_abstract(cloned_abstract);
  1634. MS_LOG(INFO) << "The parameter: " << cloned_parameter->name()
  1635. << " is cloned, the be cloned parameter is: " << cloned_from_parameter->name()
  1636. << ", clone index is: " << cloned_index;
  1637. } else {
  1638. MS_LOG(EXCEPTION) << "The parameter: " << cloned_parameter->name() << " is cloned, cloned index is "
  1639. << cloned_index << ", but not found the be cloned parameter";
  1640. }
  1641. }
  1642. }
  1643. void SetVirtualDatasetStrategy(const CNodePtr &node) {
  1644. MS_EXCEPTION_IF_NULL(node);
  1645. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  1646. bool full_batch = ParallelContext::GetInstance()->full_batch();
  1647. PrimitivePtr prim = GetValueNode<PrimitivePtr>(node->input(0));
  1648. MS_EXCEPTION_IF_NULL(prim);
  1649. if (prim->name() == VIRTUAL_DATA_SET) {
  1650. CheckGlobalDeviceManager();
  1651. int64_t dev_num;
  1652. if (full_batch) {
  1653. dev_num = 1;
  1654. } else {
  1655. dev_num = SizeToLong(g_device_manager->stage_device_num());
  1656. }
  1657. auto attrs_temp = prim->attrs();
  1658. std::vector<Shapes> shape_list = ExtractShape(node);
  1659. if (shape_list.empty()) {
  1660. MS_LOG(EXCEPTION) << "Failure:node " << node->ToString() << " failed to extract shape";
  1661. }
  1662. std::vector<ValuePtr> elements;
  1663. for (size_t i = 0; i < shape_list[0].size(); i++) {
  1664. if (shape_list[0][i].empty()) {
  1665. MS_LOG(EXCEPTION) << "shape_list[ " << i << " ].size() is zero";
  1666. }
  1667. Dimensions input_strategy = {dev_num};
  1668. for (size_t j = 1; j < shape_list[0][i].size(); j++) {
  1669. input_strategy.push_back(1);
  1670. }
  1671. elements.push_back(MakeValue(input_strategy));
  1672. }
  1673. ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
  1674. attrs_temp[STRATEGY] = strategy;
  1675. (void)prim->SetAttrs(attrs_temp);
  1676. }
  1677. }
  1678. // find previous parallel care node.
  1679. bool FindPreNodes(const AnfNodePtr &node, vector<std::string> *unique_ids) {
  1680. MS_EXCEPTION_IF_NULL(unique_ids);
  1681. // if previous node is a parameter, handle it in the outsize.
  1682. if (node->isa<Parameter>()) {
  1683. return false;
  1684. }
  1685. if (!node->isa<CNode>()) {
  1686. return false;
  1687. }
  1688. CNodePtr cnode = node->cast<CNodePtr>();
  1689. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1690. return false;
  1691. }
  1692. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1693. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1694. if (IsParallelCareNode(cnode) && prim->name() != MAKE_TUPLE && prim->name() != MAKE_LIST) {
  1695. unique_ids->push_back(cnode->UniqueId());
  1696. return true;
  1697. }
  1698. bool find = false;
  1699. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  1700. if (prim->name() == DEPEND && index != 1) {
  1701. continue;
  1702. }
  1703. if (FindPreNodes(cnode->inputs()[index], unique_ids)) {
  1704. find = true;
  1705. continue;
  1706. }
  1707. }
  1708. return find;
  1709. }
  1710. void FindLastNodesUniqueId(const std::vector<AnfNodePtr> &all_nodes, std::vector<std::string> *unique_ids) {
  1711. MS_EXCEPTION_IF_NULL(unique_ids);
  1712. for (auto &node : all_nodes) {
  1713. auto cnode = node->cast<CNodePtr>();
  1714. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  1715. continue;
  1716. }
  1717. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1718. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1719. if (prim->name() == RETURN) {
  1720. if (!FindPreNodes(cnode, unique_ids)) {
  1721. MS_LOG(WARNING) << "cannot find the last parallel care node in eval graph";
  1722. }
  1723. }
  1724. }
  1725. }
  1726. StrategyPtr GenerateBatchParallelStrategy(const OperatorInfoPtr operator_, const PrimitivePtr prim) {
  1727. MS_EXCEPTION_IF_NULL(operator_);
  1728. MS_EXCEPTION_IF_NULL(prim);
  1729. StrategyPtr strategyPtr;
  1730. std::shared_ptr<Strategys> strategy_v_ptr = operator_->GenerateBatchStrategies();
  1731. MS_EXCEPTION_IF_NULL(strategy_v_ptr);
  1732. strategyPtr = NewStrategy(0, *strategy_v_ptr);
  1733. std::vector<ValuePtr> elements;
  1734. for (size_t i = 0; i < strategy_v_ptr->size(); i++) {
  1735. elements.push_back(MakeValue((*strategy_v_ptr)[i]));
  1736. }
  1737. ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
  1738. // display the strategy generated by batch parallel
  1739. auto attrs = prim->attrs();
  1740. attrs[GEN_STRATEGY] = strategy;
  1741. (void)prim->SetAttrs(attrs);
  1742. MS_LOG(INFO) << "prim " << prim->name() << " batch parallel strategy is " << attrs[GEN_STRATEGY]->ToString();
  1743. return strategyPtr;
  1744. }
  1745. void SetLastNodeStrategy(const StrategyPtr strategyPtr) {
  1746. auto strategys = strategyPtr->GetInputDim();
  1747. for (size_t i = 0; i < strategys.size(); ++i) {
  1748. for (size_t j = 0; j < strategys[i].size(); ++j) {
  1749. strategys[i][j] = 1;
  1750. }
  1751. }
  1752. strategyPtr->ResetInputs(strategys);
  1753. }
  1754. static bool CheckExtractInfomation(const CNodePtr &cnode) {
  1755. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  1756. return false;
  1757. }
  1758. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1759. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1760. if ((prim->name() == MAKE_TUPLE) || (prim->name() == MAKE_LIST) || (prim->name() == RECEIVE)) {
  1761. return false;
  1762. }
  1763. if (!IsParallelCareNode(cnode)) {
  1764. return false;
  1765. }
  1766. return true;
  1767. }
  1768. void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes, bool is_training) {
  1769. // load strategy map from checkpoint
  1770. StrategyMap stra_map;
  1771. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
  1772. (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS)) {
  1773. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  1774. }
  1775. vector<std::string> last_forward_node_ids;
  1776. if (!is_training) {
  1777. FindLastNodesUniqueId(all_nodes, &last_forward_node_ids);
  1778. MS_LOG(INFO) << "there are " << last_forward_node_ids.size() << " output nodes in eval/predict";
  1779. }
  1780. for (auto &node : all_nodes) {
  1781. auto cnode = node->cast<CNodePtr>();
  1782. if (!CheckExtractInfomation(cnode)) {
  1783. continue;
  1784. }
  1785. SetVirtualDatasetStrategy(cnode);
  1786. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1787. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1788. auto attrs = prim->attrs();
  1789. MS_LOG(INFO) << "extract information: node: " << node->ToString() << " prim " << prim->name();
  1790. std::vector<Shapes> shape_list = ExtractShape(cnode);
  1791. if (shape_list.empty()) {
  1792. MS_LOG(EXCEPTION) << "Failure:node " << node->ToString() << " failed to extract shape";
  1793. }
  1794. OperatorInfoPtr operator_ = OperatorInstance(prim, attrs, shape_list);
  1795. MS_EXCEPTION_IF_NULL(operator_);
  1796. auto &inputs = cnode->inputs();
  1797. std::vector<ValuePtr> input_value;
  1798. for (size_t index = 1; index < inputs.size(); ++index) {
  1799. if (inputs[index]->isa<ValueNode>()) {
  1800. input_value.push_back(GetValueNode(inputs[index]));
  1801. continue;
  1802. }
  1803. input_value.emplace_back(nullptr);
  1804. }
  1805. StrategyPtr strategyPtr = nullptr;
  1806. (*operator_).set_input_value(input_value);
  1807. (*operator_).set_outputs_dtype(cnode->Type());
  1808. (*operator_).set_cnode(cnode);
  1809. if (prim->name() == RESHAPE) {
  1810. cnode->set_user_data<OperatorInfo>(operator_);
  1811. continue;
  1812. }
  1813. // load strategy checkpoint
  1814. // key of strategy map
  1815. std::string strategy_key_name = "";
  1816. auto param_names = NodeParameterName(cnode);
  1817. if (!param_names.empty()) {
  1818. strategy_key_name = prim->name() + "_" + param_names[0].first;
  1819. }
  1820. bool load_strategy_from_ckpt =
  1821. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map.find(strategy_key_name) != stra_map.end();
  1822. bool is_last_nodes = std::find(last_forward_node_ids.begin(), last_forward_node_ids.end(), cnode->UniqueId()) !=
  1823. last_forward_node_ids.end();
  1824. bool full_batch = ParallelContext::GetInstance()->full_batch();
  1825. if ((is_last_nodes && !full_batch) || (!StrategyFound(attrs) && !load_strategy_from_ckpt)) {
  1826. MS_LOG(INFO) << "ExtractInformation: the strategy of node " << node->ToString() << " prim " << prim->name()
  1827. << " is empty, using batch parallel";
  1828. strategyPtr = GenerateBatchParallelStrategy(operator_, prim);
  1829. } else if (StrategyFound(attrs)) {
  1830. strategyPtr = ExtractStrategy(attrs);
  1831. } else {
  1832. strategyPtr = stra_map[strategy_key_name];
  1833. }
  1834. MS_EXCEPTION_IF_NULL(strategyPtr);
  1835. if (is_last_nodes && full_batch) {
  1836. SetLastNodeStrategy(strategyPtr);
  1837. }
  1838. if (operator_->Init(strategyPtr) == FAILED) {
  1839. MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed";
  1840. }
  1841. cnode->set_user_data<OperatorInfo>(operator_);
  1842. }
  1843. }
  1844. TensorLayout GetInputLayoutFromCNode(const std::pair<AnfNodePtr, int64_t> &node_pair) {
  1845. CNodePtr cnode = node_pair.first->cast<CNodePtr>();
  1846. MS_EXCEPTION_IF_NULL(cnode);
  1847. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  1848. MS_EXCEPTION_IF_NULL(distribute_operator);
  1849. int64_t index = node_pair.second;
  1850. if (index > SizeToLong(distribute_operator->inputs_tensor_info().size())) {
  1851. MS_LOG(EXCEPTION) << "The index is out of range, the node_pair.second is " << index - 1 << ", the vector size is "
  1852. << distribute_operator->inputs_tensor_info().size();
  1853. }
  1854. TensorInfo tensorinfo_in = distribute_operator->inputs_tensor_info()[LongToSize(index - 1)];
  1855. TensorLayout tensorlayout_in = tensorinfo_in.tensor_layout();
  1856. return tensorlayout_in;
  1857. }
  1858. // if reshape's output connect to several primitive, return the first layout found
  1859. std::shared_ptr<TensorLayout> FindNextLayout(const CNodePtr &cnode) {
  1860. MS_EXCEPTION_IF_NULL(cnode);
  1861. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  1862. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  1863. MS_EXCEPTION_IF_NULL(manager);
  1864. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  1865. for (auto &node_pair : node_set) {
  1866. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  1867. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  1868. continue;
  1869. }
  1870. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  1871. MS_EXCEPTION_IF_NULL(prim_anf_node);
  1872. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1873. MS_EXCEPTION_IF_NULL(node_prim);
  1874. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  1875. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  1876. continue;
  1877. }
  1878. if (IsParallelCareNode(use_apply) && use_apply->has_user_data<OperatorInfo>()) {
  1879. MS_LOG(INFO) << "FindNextLayout success prim " << node_prim->name();
  1880. auto layout = GetInputLayoutFromCNode(node_pair);
  1881. return std::make_shared<TensorLayout>(layout);
  1882. }
  1883. MS_LOG(DEBUG) << "FindNextLayout failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  1884. << " " << use_apply->has_user_data<OperatorInfo>();
  1885. auto layout_ptr = FindNextLayout(use_apply);
  1886. if (layout_ptr) {
  1887. return layout_ptr;
  1888. }
  1889. }
  1890. MS_LOG(WARNING) << "FindNextLayout return nullptr, if reshape is not the last primitive, there must be some error";
  1891. return nullptr;
  1892. }
  1893. std::shared_ptr<TensorLayout> GetOutputLayoutFromCNode(const CNodePtr &cnode, size_t output_index) {
  1894. MS_EXCEPTION_IF_NULL(cnode);
  1895. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  1896. MS_EXCEPTION_IF_NULL(distribute_operator);
  1897. if (distribute_operator->outputs_tensor_info().size() <= output_index) {
  1898. MS_LOG(EXCEPTION) << "outputs_tensor_info size is " << distribute_operator->inputs_tensor_info().size()
  1899. << ", must be greater than output_index " << output_index;
  1900. }
  1901. TensorInfo tensorinfo_out = distribute_operator->outputs_tensor_info()[output_index];
  1902. TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout();
  1903. return std::make_shared<TensorLayout>(tensorlayout_out);
  1904. }
  1905. std::shared_ptr<TensorLayout> FindPrevParallelCareNodeLayout(const AnfNodePtr &node, size_t output_index) {
  1906. if (!node->isa<CNode>()) {
  1907. return nullptr;
  1908. }
  1909. CNodePtr cnode = node->cast<CNodePtr>();
  1910. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1911. return nullptr;
  1912. }
  1913. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>()) {
  1914. auto layout_ptr = GetOutputLayoutFromCNode(cnode, output_index);
  1915. if (!layout_ptr) {
  1916. MS_LOG(EXCEPTION) << "Failure:GetLayoutFromCNode failed";
  1917. }
  1918. return layout_ptr;
  1919. }
  1920. return nullptr;
  1921. }
  1922. std::shared_ptr<TensorLayout> FindParameterNextLayout(const AnfNodePtr &node) {
  1923. FuncGraphManagerPtr manager = node->func_graph()->manager();
  1924. MS_EXCEPTION_IF_NULL(manager);
  1925. AnfNodeIndexSet node_set = manager->node_users()[node];
  1926. for (auto &node_pair : node_set) {
  1927. if (IsPrimitiveCNode(node_pair.first, prim::kPrimLoad)) {
  1928. auto layout_param = FindParameterNextLayout(node_pair.first);
  1929. if (!layout_param) {
  1930. continue;
  1931. }
  1932. return layout_param;
  1933. }
  1934. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  1935. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  1936. continue;
  1937. }
  1938. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  1939. MS_EXCEPTION_IF_NULL(prim_anf_node);
  1940. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1941. MS_EXCEPTION_IF_NULL(node_prim);
  1942. if ((node_prim->name() == DEPEND && node_pair.second != 1) || node_prim->name() == RESHAPE) {
  1943. continue;
  1944. }
  1945. if (IsParallelCareNode(use_apply) && use_apply->has_user_data<OperatorInfo>()) {
  1946. auto layout = GetInputLayoutFromCNode(node_pair);
  1947. return std::make_shared<TensorLayout>(layout);
  1948. }
  1949. }
  1950. return nullptr;
  1951. }
  1952. std::shared_ptr<TensorLayout> CreateParameterLayout(const AnfNodePtr &node) {
  1953. // Create DataParallel tensor layout for parameter(support WideDeep).
  1954. auto next_layout = FindParameterNextLayout(node);
  1955. if (next_layout != nullptr) {
  1956. return next_layout;
  1957. }
  1958. CheckGlobalDeviceManager();
  1959. int64_t dev_num = g_device_manager->stage_device_num();
  1960. TensorLayout input_tensor_layout;
  1961. // create input_shape
  1962. Shapes inputs_shape = GetNodeShape(node);
  1963. Shape input_shape_array = inputs_shape[0];
  1964. if (input_shape_array.empty()) {
  1965. MS_LOG(EXCEPTION) << "Don't support reshape a scalar parameter.";
  1966. }
  1967. // create tensor_map
  1968. size_t shape_size = input_shape_array.size();
  1969. TensorMap input_tensor_map_array(SizeToLong(shape_size) - 1, -1);
  1970. input_tensor_map_array.insert(input_tensor_map_array.begin(), 0);
  1971. // create dev_matrix
  1972. Shape dev_matrix_array = {dev_num};
  1973. if (input_tensor_layout.InitFromVector(dev_matrix_array, input_tensor_map_array, input_shape_array) != SUCCESS) {
  1974. MS_LOG(EXCEPTION) << "Create tensor layout for parameter failed.";
  1975. }
  1976. return std::make_shared<TensorLayout>(input_tensor_layout);
  1977. }
  1978. RedistributionOpListPtr InferSensRedistribution(const AnfNodePtr &node, const TensorLayout &loss_layout) {
  1979. MS_EXCEPTION_IF_NULL(node);
  1980. TensorRedistribution tensor_redistribution;
  1981. // create stand alone layout:TensorMap:[all -1],dev_matrix:[dev_num].
  1982. CheckGlobalDeviceManager();
  1983. int64_t dev_num = g_device_manager->stage_device_num();
  1984. TensorLayout stand_alone_layout;
  1985. Shapes inputs_shape = GetNodeShape(node);
  1986. if (inputs_shape.empty()) {
  1987. MS_LOG(EXCEPTION) << "InferSensRedistribution failed cause inputs shape is empty.";
  1988. }
  1989. Shape input_shape_array = inputs_shape[0];
  1990. if (input_shape_array.empty()) {
  1991. MS_LOG(INFO) << "No need to redistribution for sens.";
  1992. return nullptr;
  1993. }
  1994. // TensorMap
  1995. TensorMap stand_alone_tensor_map_array(SizeToLong(input_shape_array.size()), -1);
  1996. // Dev_matrix
  1997. Shape dev_matrix_array = {dev_num};
  1998. if (stand_alone_layout.InitFromVector(dev_matrix_array, stand_alone_tensor_map_array, input_shape_array) == FAILED) {
  1999. MS_LOG(EXCEPTION) << "Create tensor layout for Sens failed.";
  2000. }
  2001. // Infer Redistribution op list for stand alone and loss layout.
  2002. RankList dev_list = g_device_manager->GetDeviceListInThisStage();
  2003. if (tensor_redistribution.Init(stand_alone_layout, loss_layout, dev_list) == FAILED) {
  2004. MS_LOG(EXCEPTION) << "Redistribution for Sens init failed.";
  2005. }
  2006. RedistributionOpListPtr sens_redistribution_list = tensor_redistribution.InferTensorRedistributionOperatorList();
  2007. MS_EXCEPTION_IF_NULL(sens_redistribution_list);
  2008. return sens_redistribution_list;
  2009. }
  2010. std::shared_ptr<TensorLayout> FindPrevLayout(const AnfNodePtr &node) {
  2011. if (node->isa<Parameter>()) {
  2012. return CreateParameterLayout(node);
  2013. }
  2014. if (!node->isa<CNode>()) {
  2015. return nullptr;
  2016. }
  2017. CNodePtr cnode = node->cast<CNodePtr>();
  2018. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2019. return nullptr;
  2020. }
  2021. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>() &&
  2022. !IsPrimitiveCNode(node, prim::kPrimReshape)) {
  2023. auto layout_ptr = GetOutputLayoutFromCNode(cnode, 0);
  2024. if (!layout_ptr) {
  2025. MS_LOG(EXCEPTION) << "Failure:GetLayoutFromCNode failed";
  2026. }
  2027. return layout_ptr;
  2028. }
  2029. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  2030. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  2031. if (prim->name() == prim::kTupleGetItem) {
  2032. auto tuple_index = GetTupleGetItemIndex(cnode);
  2033. auto layout_ptr = FindPrevParallelCareNodeLayout(cnode->input(1), LongToSize(tuple_index));
  2034. if (!layout_ptr) {
  2035. MS_LOG(EXCEPTION)
  2036. << " Failure:FindPrevLayout failed, tuple_getitem before reshape, but there does not exit a parallel care node "
  2037. "before tuple_getitem!";
  2038. }
  2039. return layout_ptr;
  2040. }
  2041. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  2042. if (prim->name() == DEPEND && index != 1) {
  2043. continue;
  2044. }
  2045. auto layout_ptr = FindPrevLayout(cnode->inputs()[index]);
  2046. if (!layout_ptr) {
  2047. continue;
  2048. }
  2049. return layout_ptr;
  2050. }
  2051. MS_LOG(WARNING) << "FindPrevLayout return nullptr, if reshape is not the first primitive, there must be some error";
  2052. return nullptr;
  2053. }
  2054. void ReshapeInit(const std::vector<AnfNodePtr> &all_nodes) {
  2055. for (auto &node : all_nodes) {
  2056. auto cnode = node->cast<CNodePtr>();
  2057. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  2058. continue;
  2059. }
  2060. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  2061. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  2062. continue;
  2063. }
  2064. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  2065. MS_EXCEPTION_IF_NULL(prim);
  2066. OperatorInfoPtr operator_info = cnode->user_data<OperatorInfo>();
  2067. if (operator_info == nullptr) {
  2068. MS_LOG(EXCEPTION) << "Failure:Primitive " << prim->ToString() << " OperatorInstance is nullptr";
  2069. }
  2070. if (prim->name() != RESHAPE) {
  2071. continue;
  2072. }
  2073. auto attrs = prim->attrs();
  2074. if (StrategyFound(attrs)) {
  2075. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  2076. }
  2077. MS_ASSERT(cnode->inputs().size() == 3);
  2078. auto prev_layout_ptr = FindPrevLayout(cnode->input(1));
  2079. if (prev_layout_ptr) {
  2080. auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  2081. reshape_info_ptr->SetInputLayout(*prev_layout_ptr);
  2082. }
  2083. auto next_layout_ptr = FindNextLayout(cnode);
  2084. if (next_layout_ptr) {
  2085. auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  2086. reshape_info_ptr->SetOutputLayout(*next_layout_ptr);
  2087. }
  2088. if (operator_info->Init(nullptr) == FAILED) {
  2089. MS_LOG(EXCEPTION) << "Failure:operator " << prim->ToString() << " init failed";
  2090. }
  2091. }
  2092. }
  2093. CNodePtr HandleDependLoss(const CNodePtr &cnode) {
  2094. // Handle return->depend->loss
  2095. auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2096. MS_EXCEPTION_IF_NULL(prim);
  2097. if (prim->name() == DEPEND) {
  2098. auto depend_before = cnode->input(1)->cast<CNodePtr>();
  2099. MS_EXCEPTION_IF_NULL(depend_before);
  2100. return HandleDependLoss(depend_before);
  2101. }
  2102. return cnode;
  2103. }
  2104. LossNodeInfo FindLossCNode(const FuncGraphPtr &func_graph) {
  2105. LossNodeInfo loss_node_info;
  2106. MS_EXCEPTION_IF_NULL(func_graph);
  2107. CNodePtr return_node = func_graph->get_return();
  2108. MS_EXCEPTION_IF_NULL(return_node);
  2109. if (return_node->size() < 2) {
  2110. MS_LOG(EXCEPTION) << "Failure: " << return_node->ToString() << " size is smaller than 2";
  2111. }
  2112. AnfNodePtr pre_node = return_node->input(1);
  2113. MS_EXCEPTION_IF_NULL(pre_node);
  2114. if (IsPrimitiveCNode(pre_node, prim::kPrimDepend)) {
  2115. pre_node = pre_node->cast<CNodePtr>()->input(1);
  2116. MS_EXCEPTION_IF_NULL(pre_node);
  2117. }
  2118. auto pre_cnode = pre_node->cast<CNodePtr>();
  2119. if (pre_cnode == nullptr || !IsValueNode<Primitive>(pre_cnode->input(0))) {
  2120. return loss_node_info;
  2121. }
  2122. if (!IsValueNode<Primitive>(pre_cnode->input(0))) {
  2123. MS_LOG(DEBUG) << "pre_cnode:" << pre_cnode->ToString();
  2124. return loss_node_info;
  2125. }
  2126. auto prim = GetValueNode<PrimitivePtr>(pre_cnode->input(0));
  2127. // return -> cast
  2128. if (prim->name() == CAST && !pre_cnode->has_user_data<OperatorInfo>()) {
  2129. pre_cnode = pre_cnode->input(1)->cast<CNodePtr>();
  2130. MS_EXCEPTION_IF_NULL(pre_cnode);
  2131. }
  2132. pre_cnode = HandleDependLoss(pre_cnode);
  2133. auto current_prim = GetValueNode<PrimitivePtr>(pre_cnode->input(0));
  2134. // notice: the GetNext op has not input
  2135. if (INVALID_LOSS_OPS.find(current_prim->name()) != INVALID_LOSS_OPS.end()) {
  2136. MS_LOG(INFO) << "The loss is: " << current_prim->name();
  2137. loss_node_info.loss_node = pre_cnode;
  2138. return loss_node_info;
  2139. }
  2140. // size of common cnode is larger than 1
  2141. if (pre_cnode->size() < 2) {
  2142. MS_LOG(EXCEPTION) << pre_cnode->ToString() << " size( " << pre_cnode->inputs().size() << " ) is smaller than 2";
  2143. }
  2144. // return -> tuple_getitem -> loss
  2145. if (current_prim->name() == prim::kTupleGetItem) {
  2146. auto tuple_index = GetTupleGetItemIndex(pre_cnode);
  2147. AnfNodePtr pre_pre_node = pre_cnode->input(1);
  2148. MS_EXCEPTION_IF_NULL(pre_pre_node);
  2149. auto pre_pre_cnode = pre_pre_node->cast<CNodePtr>();
  2150. loss_node_info.has_tuple_getitem = true;
  2151. loss_node_info.dout_index = tuple_index;
  2152. loss_node_info.loss_node = pre_pre_cnode;
  2153. return loss_node_info;
  2154. }
  2155. // return -> make_tuple
  2156. if (current_prim->name() == MAKE_TUPLE) {
  2157. MS_LOG(WARNING) << "The loss have make_tuple, it is not supported";
  2158. return loss_node_info;
  2159. }
  2160. // return -> loss
  2161. loss_node_info.loss_node = pre_cnode;
  2162. MS_LOG(DEBUG) << "The loss name is " << current_prim->name();
  2163. return loss_node_info;
  2164. }
  2165. TensorLayouts GetLossNodeGradOutputLayout(const LossNodeInfo &node_info) {
  2166. TensorLayouts ret;
  2167. auto loss_cnode = node_info.loss_node;
  2168. MS_EXCEPTION_IF_NULL(loss_cnode);
  2169. ValueNodePtr prim_anf_node = loss_cnode->input(0)->cast<ValueNodePtr>();
  2170. MS_EXCEPTION_IF_NULL(prim_anf_node);
  2171. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  2172. MS_EXCEPTION_IF_NULL(prim);
  2173. if (INVALID_LOSS_OPS.find(prim->name()) != INVALID_LOSS_OPS.end()) {
  2174. MS_LOG(WARNING) << "The loss name is: " << prim->name() << ", do nothing for split sens now";
  2175. return ret;
  2176. }
  2177. OperatorInfoPtr operator_info = loss_cnode->user_data<OperatorInfo>();
  2178. MS_EXCEPTION_IF_NULL(operator_info);
  2179. TensorInfo loss_grad_tensor_info;
  2180. size_t op_output_size = operator_info->outputs_tensor_info().size();
  2181. MS_LOG(INFO) << "The loss name is " << operator_info->name() << ", the has tuple item is "
  2182. << node_info.has_tuple_getitem << ", the output size is " << op_output_size << ", the dout_index is "
  2183. << node_info.dout_index;
  2184. if ((op_output_size == 0) || (op_output_size <= LongToSize(node_info.dout_index))) {
  2185. MS_LOG(EXCEPTION) << "The index is " << node_info.dout_index << ", but the size of outputs is " << op_output_size;
  2186. }
  2187. if (!node_info.has_tuple_getitem && (op_output_size > 1)) {
  2188. MS_LOG(EXCEPTION) << "Currently, it is not supported that the sens is a tuple.";
  2189. }
  2190. loss_grad_tensor_info = operator_info->outputs_tensor_info()[LongToSize(node_info.dout_index)];
  2191. ret.push_back(loss_grad_tensor_info.tensor_layout());
  2192. return ret;
  2193. }
  2194. void SplitSens(const CNodePtr &grad_sens_node, const TensorLayout &loss_grad_layout) {
  2195. MS_EXCEPTION_IF_NULL(grad_sens_node);
  2196. if (grad_sens_node->size() <= 1) {
  2197. MS_LOG(EXCEPTION) << "The size of grad sens node is smaller than 2";
  2198. }
  2199. AnfNodePtr sens_tensor_node = grad_sens_node->input(1);
  2200. MS_EXCEPTION_IF_NULL(sens_tensor_node);
  2201. Shapes sens_shapes = GetNodeShape(sens_tensor_node);
  2202. if (sens_shapes.size() != 1) {
  2203. MS_LOG(EXCEPTION) << "GetNodeShape for sens_tensor_node, output size is not 1";
  2204. }
  2205. // If the shape of sens tensor is [] or [1], no need to split it.
  2206. Shape sens_shape = sens_shapes[0];
  2207. if (sens_shape.empty() || ((sens_shape.size() == 1) && (sens_shape[0] == 1))) {
  2208. if (sens_tensor_node->isa<Parameter>()) {
  2209. auto sens_tensor_param = sens_tensor_node->cast<ParameterPtr>();
  2210. MS_LOG(DEBUG) << "loss layout " << loss_grad_layout.ToString();
  2211. sens_tensor_param->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(loss_grad_layout));
  2212. }
  2213. MS_LOG(INFO) << "The shape of sens is " << ShapeToString(sens_shape) << ", no need to split sens";
  2214. return;
  2215. }
  2216. auto loss_shape = loss_grad_layout.tensor_shape().array();
  2217. if (loss_shape != sens_shape) {
  2218. MS_LOG(EXCEPTION) << "The shape of sens is not equal to loss output, it is unsupported now. Sens shape is "
  2219. << ShapeToString(sens_shape) << ", loss shape is " << ShapeToString(loss_shape);
  2220. }
  2221. MS_LOG(INFO) << "The shape of sens is " << ShapeToString(sens_shape) << ", split it.";
  2222. if (!IsValueNode<Tensor>(sens_tensor_node)) {
  2223. if (sens_tensor_node->isa<Parameter>()) {
  2224. MS_LOG(DEBUG) << "loss layout " << loss_grad_layout.ToString();
  2225. AbstractBasePtr abstract = sens_tensor_node->abstract();
  2226. MS_EXCEPTION_IF_NULL(abstract);
  2227. auto slice_shape = loss_grad_layout.slice_shape().array();
  2228. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  2229. MS_EXCEPTION_IF_NULL(parallel_shape);
  2230. auto cloned_abstract = abstract->Clone();
  2231. MS_EXCEPTION_IF_NULL(cloned_abstract);
  2232. cloned_abstract->set_shape(parallel_shape);
  2233. sens_tensor_node->set_abstract(cloned_abstract);
  2234. auto sens_tensor_param = sens_tensor_node->cast<ParameterPtr>();
  2235. sens_tensor_param->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(loss_grad_layout));
  2236. return;
  2237. }
  2238. if (sens_tensor_node->isa<CNode>()) {
  2239. auto op_list_ptr = InferSensRedistribution(sens_tensor_node, loss_grad_layout);
  2240. if (op_list_ptr == nullptr) {
  2241. return;
  2242. }
  2243. auto sens_tensor_cnode = sens_tensor_node->cast<CNodePtr>();
  2244. auto func_graph = grad_sens_node->func_graph();
  2245. MS_EXCEPTION_IF_NULL(func_graph);
  2246. InsertRedistribution(op_list_ptr, grad_sens_node, func_graph, 1, sens_tensor_cnode);
  2247. return;
  2248. }
  2249. MS_LOG(EXCEPTION) << "The type of sens node is not Tensor or Parameter or CNode, it is unsupported now.";
  2250. }
  2251. // Use _GetTensorSlice operator to split the sens tensor
  2252. FuncGraphPtr func_graph = grad_sens_node->func_graph(); // only cnode can get the graph
  2253. MS_EXCEPTION_IF_NULL(func_graph);
  2254. Operator op = CreateGetTensorSliceOp(loss_grad_layout);
  2255. InsertGetTensorSliceOp(op, grad_sens_node, func_graph, 1, SPLIT_SENS);
  2256. }
  2257. void InsertForwardOps(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2258. MS_EXCEPTION_IF_NULL(distribute_operator);
  2259. MS_EXCEPTION_IF_NULL(cnode);
  2260. OperatorVector forward_op = distribute_operator->forward_op();
  2261. if (!forward_op.empty()) {
  2262. MS_LOG(INFO) << "Insert forward op for " << distribute_operator->name();
  2263. ForwardCommunication(forward_op, cnode);
  2264. }
  2265. }
  2266. void StepReplace(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2267. MS_EXCEPTION_IF_NULL(distribute_operator);
  2268. MS_EXCEPTION_IF_NULL(cnode);
  2269. // StepReplaceOp
  2270. OperatorVector replace_op = distribute_operator->replace_op();
  2271. if (!replace_op.empty()) {
  2272. MS_LOG(INFO) << "StepReplaceOp " << cnode->ToString();
  2273. StepReplaceOp(replace_op, cnode);
  2274. }
  2275. // StepReplaceGraph: after calling StepReplaceGraph, cnode can not be used anymore.
  2276. ReplaceGraphPtr replace_graph = distribute_operator->replace_graph(cnode);
  2277. if (!replace_op.empty() && replace_graph) {
  2278. MS_LOG(EXCEPTION) << "Only one of replace_op or replace_op can be used";
  2279. }
  2280. if (replace_graph) {
  2281. MS_LOG(INFO) << "StepReplaceGraph " << cnode->ToString();
  2282. StepReplaceGraph(replace_graph, cnode);
  2283. }
  2284. }
  2285. void HandleDropoutNode(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2286. MS_EXCEPTION_IF_NULL(distribute_operator);
  2287. MS_EXCEPTION_IF_NULL(cnode);
  2288. std::string op_name = distribute_operator->name();
  2289. if (op_name.find(DROPOUT_DO_MASK) == std::string::npos) {
  2290. return;
  2291. }
  2292. DropoutDoMaskInfoPtr dropout_do_mask = std::dynamic_pointer_cast<DropoutDoMaskInfo>(distribute_operator);
  2293. MS_EXCEPTION_IF_NULL(dropout_do_mask);
  2294. std::vector<Operator> replace_op = dropout_do_mask->GetDropoutGenMaskReplaceOp(cnode);
  2295. if (replace_op.empty()) {
  2296. MS_LOG(DEBUG) << "No need to replace dropout_gen_mask";
  2297. return;
  2298. }
  2299. if (cnode->inputs().size() != DROPOUT_DO_MASK_CNODE_INPUT_SIZE) {
  2300. MS_LOG(EXCEPTION) << "The size of drop out do mask cnode's input is not " << DROPOUT_DO_MASK_CNODE_INPUT_SIZE;
  2301. }
  2302. ReplaceOneOp(replace_op[0], cnode->input(DROPOUT_GEN_MASK_INDEX)->cast<CNodePtr>());
  2303. }
  2304. void HandleTileNode(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2305. MS_EXCEPTION_IF_NULL(cnode);
  2306. if (cnode->size() < 3 || !IsValueNode<Primitive>(cnode->input(0))) {
  2307. return;
  2308. }
  2309. auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2310. if (prim->name() != TILE) {
  2311. return;
  2312. }
  2313. TileInfoPtr tile = std::dynamic_pointer_cast<TileInfo>(distribute_operator);
  2314. MS_EXCEPTION_IF_NULL(tile);
  2315. tile->UpdateMultiples(cnode);
  2316. }
  2317. void HandleSpecialNode(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2318. HandleDropoutNode(distribute_operator, cnode);
  2319. HandleTileNode(distribute_operator, cnode);
  2320. }
  2321. std::set<FuncGraphPtr> FindForwardGraphByRootNodes(const AnfNodeSet &root_all_nodes) {
  2322. // J->CNode->Graph
  2323. std::set<FuncGraphPtr> graph_set;
  2324. for (auto &node : root_all_nodes) {
  2325. MS_EXCEPTION_IF_NULL(node);
  2326. if (!node->isa<CNode>()) {
  2327. continue;
  2328. }
  2329. auto cnode = node->cast<CNodePtr>();
  2330. if ((cnode->size() < 2) || !IsValueNode<Primitive>(cnode->input(0))) {
  2331. continue;
  2332. }
  2333. auto expect_j_prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2334. if (expect_j_prim->name() != J) {
  2335. continue;
  2336. }
  2337. if (IsValueNode<FuncGraph>(cnode->input(1))) {
  2338. auto graph = GetValueNode<FuncGraphPtr>(cnode->input(1));
  2339. MS_LOG(DEBUG) << "Find the forward graph success";
  2340. graph_set.insert(graph);
  2341. auto manager = graph->manager();
  2342. MS_EXCEPTION_IF_NULL(manager);
  2343. auto graph_used = manager->func_graphs_used_total(graph);
  2344. for (auto &sub_graph : graph_used) {
  2345. graph_set.insert(sub_graph);
  2346. }
  2347. }
  2348. }
  2349. return graph_set;
  2350. }
  2351. void StepSplitSens(const std::pair<CNodePtr, LossNodeInfo> &sens_loss_pair) {
  2352. CNodePtr sens_node = sens_loss_pair.first;
  2353. auto loss_node = sens_loss_pair.second;
  2354. auto loss_grad_layout = GetLossNodeGradOutputLayout(loss_node);
  2355. if (!loss_grad_layout.empty()) {
  2356. SplitSens(sens_node, loss_grad_layout[0]);
  2357. }
  2358. }
  2359. // Sens node satisfies the following conditions: cnode(sens)-->cnode(tuple_getitem)-->cnode-->cnode(J)
  2360. std::vector<std::pair<CNodePtr, LossNodeInfo>> GetSensLossPairs(const FuncGraphPtr &root) {
  2361. MS_EXCEPTION_IF_NULL(root);
  2362. std::vector<std::pair<CNodePtr, LossNodeInfo>> sens_loss_pairs;
  2363. for (auto &node : root->nodes()) {
  2364. if (!node->isa<CNode>()) {
  2365. continue;
  2366. }
  2367. // cnode(sens)-->cnode(tuple_getitem)
  2368. auto sens_cnode = node->cast<CNodePtr>();
  2369. AnfNodePtr expect_tuple_getitem = sens_cnode->input(0);
  2370. MS_EXCEPTION_IF_NULL(expect_tuple_getitem);
  2371. if (!expect_tuple_getitem->isa<CNode>()) {
  2372. continue;
  2373. }
  2374. auto expect_tuple_getitem_cnode = expect_tuple_getitem->cast<CNodePtr>();
  2375. if (!IsSomePrimitive(expect_tuple_getitem_cnode, prim::kTupleGetItem)) {
  2376. continue;
  2377. }
  2378. // cnode(sens)-->cnode(tuple_getitem)-->cnode
  2379. AnfNodePtr expect_anonymous = expect_tuple_getitem_cnode->input(1);
  2380. MS_EXCEPTION_IF_NULL(expect_anonymous);
  2381. if (!expect_anonymous->isa<CNode>()) {
  2382. continue;
  2383. }
  2384. // cnode(sens)-->cnode(tuple_getitem)-->cnode-->cnode(J)
  2385. auto expect_anonymous_cnode = expect_anonymous->cast<CNodePtr>();
  2386. AnfNodePtr expect_j = expect_anonymous_cnode->input(0);
  2387. MS_EXCEPTION_IF_NULL(expect_j);
  2388. if (!expect_j->isa<CNode>()) {
  2389. continue;
  2390. }
  2391. auto expect_j_cnode = expect_j->cast<CNodePtr>();
  2392. if (!IsSomePrimitive(expect_j_cnode, J)) {
  2393. continue;
  2394. }
  2395. if (!IsValueNode<FuncGraph>(expect_j_cnode->input(1))) {
  2396. MS_LOG(EXCEPTION) << "Sens can't find the corresponding graph.";
  2397. }
  2398. auto func_graph = GetValueNode<FuncGraphPtr>(expect_j_cnode->input(1));
  2399. auto loss_node_info = FindLossCNode(func_graph);
  2400. if (loss_node_info.loss_node == nullptr) {
  2401. MS_LOG(WARNING) << "Can not find the loss cnode";
  2402. continue;
  2403. }
  2404. std::pair<CNodePtr, LossNodeInfo> sens_loss_pair = std::make_pair(sens_cnode, loss_node_info);
  2405. sens_loss_pairs.push_back(sens_loss_pair);
  2406. }
  2407. return sens_loss_pairs;
  2408. }
  2409. bool IsLastStage() {
  2410. MS_EXCEPTION_IF_NULL(g_device_manager);
  2411. auto stage_num = g_device_manager->stage_num();
  2412. auto stage_id = g_device_manager->stage_id();
  2413. return ((stage_num - 1) == stage_id);
  2414. }
  2415. void ParallelCommunication(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes,
  2416. const FuncGraphManagerPtr &manager) {
  2417. MS_EXCEPTION_IF_NULL(root);
  2418. MS_EXCEPTION_IF_NULL(manager);
  2419. TensorRedistribution tensor_redistribution;
  2420. std::vector<std::pair<CNodePtr, LossNodeInfo>> sens_loss_pairs = GetSensLossPairs(root);
  2421. bool has_backward = !sens_loss_pairs.empty();
  2422. // split sens must before inserting the operators.
  2423. for (auto &pair : sens_loss_pairs) {
  2424. // If the shape of grad-sens tensor is not [] or [1], use get tensor slice to handle it.
  2425. // If the type of sens node is not Tensor, it is unsupported now, do nothing default.
  2426. if (IsLastStage()) {
  2427. StepSplitSens(pair);
  2428. }
  2429. }
  2430. for (auto &node : all_nodes) {
  2431. MS_EXCEPTION_IF_NULL(node);
  2432. if (node->isa<CNode>()) {
  2433. auto cnode = node->cast<CNodePtr>();
  2434. // the make_tuple is parallel care node, but it may have not operator info
  2435. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  2436. continue;
  2437. }
  2438. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  2439. MS_EXCEPTION_IF_NULL(distribute_operator);
  2440. // insert forward ops
  2441. if (!IsSomePrimitive(cnode, RECEIVE)) {
  2442. InsertForwardOps(distribute_operator, cnode);
  2443. }
  2444. // insert redistribution ops
  2445. StepRedistribution(cnode, distribute_operator, cnode, tensor_redistribution, cnode);
  2446. // insert backward ops
  2447. if (has_backward && !IsSomePrimitive(cnode, RECEIVE)) {
  2448. BackwardCommunication(root, distribute_operator, cnode, sens_loss_pairs);
  2449. }
  2450. HandleSpecialNode(distribute_operator, cnode);
  2451. } else if (IsValueNode<Tensor>(node) || IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  2452. StepSplitTensor(node, manager);
  2453. }
  2454. }
  2455. for (auto &node : all_nodes) {
  2456. MS_EXCEPTION_IF_NULL(node);
  2457. if (node->isa<CNode>()) {
  2458. auto cnode = node->cast<CNodePtr>();
  2459. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>() || IsSomePrimitive(cnode, RECEIVE)) {
  2460. continue;
  2461. }
  2462. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  2463. MS_EXCEPTION_IF_NULL(distribute_operator);
  2464. // StepReplace
  2465. StepReplace(distribute_operator, cnode);
  2466. }
  2467. }
  2468. }
  2469. namespace {
  2470. void RevertSymbolicKeyInstance(const FuncGraphPtr &root, const AnfNodePtr &node) {
  2471. MS_EXCEPTION_IF_NULL(root);
  2472. MS_EXCEPTION_IF_NULL(node);
  2473. auto symbolic_key = GetValueNode<SymbolicKeyInstancePtr>(node);
  2474. MS_EXCEPTION_IF_NULL(symbolic_key);
  2475. auto all_upstream_node = root->manager()->node_users()[node];
  2476. for (auto &upstream_node : all_upstream_node) {
  2477. FuncGraphPtr fg = upstream_node.first->func_graph();
  2478. if (symbolic_key->node()->isa<Parameter>()) {
  2479. for (auto &param : root->parameters()) {
  2480. if (*param == *symbolic_key->node()) {
  2481. AnfNodePtr reverted_node = root->NewCNode({NewValueNode(prim::kPrimEmbed), param});
  2482. MS_EXCEPTION_IF_NULL(reverted_node);
  2483. MS_LOG(DEBUG) << "before replace " << node->ToString() << " to node " << reverted_node->DebugString();
  2484. (void)fg->manager()->Replace(node, reverted_node);
  2485. MS_LOG(DEBUG) << "revert node " << node->ToString() << " to node " << reverted_node->DebugString();
  2486. }
  2487. }
  2488. }
  2489. }
  2490. }
  2491. } // namespace
  2492. void HandleSymbolicKeyInstance(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
  2493. MS_EXCEPTION_IF_NULL(root);
  2494. for (auto &node : all_nodes) {
  2495. // revert back SymbolicKeyInstance to embed() primitive
  2496. if (IsValueNode<SymbolicKeyInstance>(node)) {
  2497. RevertSymbolicKeyInstance(root, node);
  2498. continue;
  2499. }
  2500. }
  2501. }
  2502. std::vector<std::pair<std::string, int64_t>> NodeParameterName(const CNodePtr &node) {
  2503. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  2504. std::vector<std::pair<std::string, int64_t>> param_names;
  2505. for (int64_t i = 0; i < UlongToLong(node_inputs.size()); ++i) {
  2506. auto input = node_inputs[i];
  2507. if (input->isa<Parameter>()) {
  2508. auto input_parameter = input->cast<ParameterPtr>();
  2509. if (input_parameter->has_default() && ParameterRequireGrad(input_parameter)) {
  2510. param_names.push_back({input_parameter->name(), i});
  2511. }
  2512. } else if (input->isa<CNode>()) {
  2513. CNodePtr cnode = input->cast<CNodePtr>();
  2514. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2515. return param_names;
  2516. }
  2517. if ((IsPrimitiveCNode(cnode, prim::kPrimCast) && cnode->inputs().size() >= 1) ||
  2518. IsPrimitiveCNode(cnode, prim::kPrimLoad)) {
  2519. auto inp = cnode->input(1);
  2520. if (inp->isa<Parameter>()) {
  2521. auto inp_param = inp->cast<ParameterPtr>();
  2522. if (inp_param->has_default() && ParameterRequireGrad(inp_param)) {
  2523. param_names.push_back({inp_param->name(), i});
  2524. }
  2525. }
  2526. }
  2527. }
  2528. }
  2529. return param_names;
  2530. }
  2531. void CheckpointStrategy(const std::vector<AnfNodePtr> &all_nodes) {
  2532. StrategyMap stra_map;
  2533. TensorInfoMap tensor_info_map;
  2534. ManualShapeMap manual_shape_map;
  2535. for (auto &node : all_nodes) {
  2536. MS_EXCEPTION_IF_NULL(node);
  2537. auto cnode = node->cast<CNodePtr>();
  2538. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  2539. continue;
  2540. }
  2541. auto param_names = NodeParameterName(cnode);
  2542. if (param_names.empty()) {
  2543. continue;
  2544. }
  2545. string param_name = param_names[0].first;
  2546. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2547. MS_EXCEPTION_IF_NULL(prim);
  2548. OperatorInfoPtr operator_info = cnode->user_data<OperatorInfo>();
  2549. if (operator_info) {
  2550. if (operator_info->name().find(RESHAPEINFO) != std::string::npos) {
  2551. continue;
  2552. }
  2553. std::vector<TensorInfo> input_tensor_info = operator_info->inputs_tensor_info();
  2554. std::string stratey_key_name = prim->name() + "_" + param_name;
  2555. stra_map[stratey_key_name] = operator_info->strategy();
  2556. for (auto param_name_pair : param_names) {
  2557. if (param_name_pair.second - 1 >= UlongToLong(input_tensor_info.size())) {
  2558. continue;
  2559. }
  2560. tensor_info_map[param_name_pair.first] = input_tensor_info[param_name_pair.second - 1];
  2561. }
  2562. if (operator_info->name().find(EMBEDDING_LOOKUP) != std::string::npos ||
  2563. operator_info->name().find(GATHERV2) != std::string::npos) {
  2564. auto gatherv2_info = std::dynamic_pointer_cast<GatherPInfo>(operator_info);
  2565. auto param_split_shapes = gatherv2_info->param_split_shapes();
  2566. auto index_offsets = gatherv2_info->index_offsets();
  2567. if (param_split_shapes.size() != index_offsets.size()) {
  2568. MS_LOG(EXCEPTION) << "In manual split, the param_split_shapes and index_offsets length should be same.";
  2569. }
  2570. std::vector<std::pair<int64_t, int64_t>> manual_shape;
  2571. for (int64_t i = 0; i < UlongToLong(param_split_shapes.size()); ++i) {
  2572. manual_shape.push_back({param_split_shapes[i], index_offsets[i]});
  2573. }
  2574. manual_shape_map[param_name] = manual_shape;
  2575. }
  2576. }
  2577. }
  2578. if (StrategyCheckpoint::GetInstance().Save(stra_map, tensor_info_map, &manual_shape_map) != SUCCESS) {
  2579. MS_LOG(EXCEPTION) << "Save strategy checkpoint failed";
  2580. }
  2581. }
  2582. void SetForwardFlag(const std::vector<AnfNodePtr> &all_nodes) {
  2583. for (auto &node : all_nodes) {
  2584. MS_EXCEPTION_IF_NULL(node);
  2585. if (!node->isa<CNode>()) {
  2586. continue;
  2587. }
  2588. auto cnode = node->cast<CNodePtr>();
  2589. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2590. continue;
  2591. }
  2592. // CNode is globally unique.
  2593. MS_LOG(DEBUG) << "Set forward flag " << cnode->DebugString() << ".";
  2594. cnode->set_in_forward_flag(true);
  2595. }
  2596. }
  2597. void SetForwardFlag(const AnfNodeSet &all_nodes) {
  2598. for (auto &node : all_nodes) {
  2599. MS_EXCEPTION_IF_NULL(node);
  2600. if (!node->isa<CNode>()) {
  2601. continue;
  2602. }
  2603. auto cnode = node->cast<CNodePtr>();
  2604. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2605. continue;
  2606. }
  2607. // CNode is globally unique.
  2608. cnode->set_in_forward_flag(true);
  2609. }
  2610. }
  2611. std::set<FuncGraphPtr> ForwardGraph(const FuncGraphPtr &root) {
  2612. MS_EXCEPTION_IF_NULL(root);
  2613. const auto &all_nodes = root->nodes();
  2614. std::set<FuncGraphPtr> graph_set = FindForwardGraphByRootNodes(all_nodes);
  2615. return graph_set;
  2616. }
  2617. std::vector<AnfNodePtr> FindRootForwardCNode(const FuncGraphPtr &graph, const AnfNodeSet &all_nodes) {
  2618. MS_EXCEPTION_IF_NULL(graph);
  2619. std::vector<AnfNodePtr> root_forward_nodes;
  2620. auto loss_cnode = FindLossCNode(graph).loss_node;
  2621. if (loss_cnode == nullptr) {
  2622. MS_LOG(WARNING) << "Can not find the loss cnode";
  2623. return root_forward_nodes;
  2624. }
  2625. auto loss_cnode_id = loss_cnode->UniqueIdThroughCopy();
  2626. for (auto &node : all_nodes) {
  2627. MS_EXCEPTION_IF_NULL(node);
  2628. if (!node->isa<CNode>()) {
  2629. continue;
  2630. }
  2631. auto cnode = node->cast<CNodePtr>();
  2632. auto root_node_id = node->UniqueIdThroughCopy();
  2633. if (loss_cnode_id == root_node_id) {
  2634. root_forward_nodes = DeepLinkedGraphSearch(cnode);
  2635. break;
  2636. }
  2637. }
  2638. return root_forward_nodes;
  2639. }
  2640. void InsertShapeOp(const CNodePtr &node, const AnfNodePtr &pre_node, const FuncGraphPtr &root) {
  2641. // shape op doesn't have params and attrs.
  2642. OperatorParams params;
  2643. OperatorAttrs attrs;
  2644. auto shape_value = GetValueNode(node->input(2))->cast<ValueSequeuePtr>();
  2645. MS_EXCEPTION_IF_NULL(shape_value);
  2646. auto shape = shape_value->value();
  2647. if (shape.empty()) {
  2648. return;
  2649. }
  2650. OperatorArgs args = std::make_pair(attrs, params);
  2651. Operator op = std::make_pair(SHAPE_OP, args);
  2652. InsertNode(op, node, 2, pre_node, root, "shape");
  2653. }
  2654. static AnfNodePtr FindGrad(const CNodePtr &cnode) {
  2655. for (auto &node : cnode->inputs()) {
  2656. if (!node->isa<CNode>()) {
  2657. continue;
  2658. }
  2659. if (!IsPrimitiveCNode(node, prim::kPrimEnvGetItem)) {
  2660. return FindGrad(node->cast<CNodePtr>());
  2661. } else {
  2662. return node;
  2663. }
  2664. }
  2665. return nullptr;
  2666. }
  2667. void HandleRootReshapeAndSaveStrategy(const std::vector<AnfNodePtr> &all_nodes) {
  2668. // If root graph has reshape op. Find the corresponding parameter.
  2669. // Reshape's shape is the shape of the parameter.
  2670. auto executor = pipeline::ExecutorPy::GetInstance();
  2671. for (auto &node : all_nodes) {
  2672. if (!node->isa<CNode>()) {
  2673. continue;
  2674. }
  2675. auto cnode = node->cast<CNodePtr>();
  2676. if (!IsValueNode<Primitive>(cnode->input(0)) || cnode == nullptr) {
  2677. continue;
  2678. }
  2679. if (cnode->in_forward_flag()) {
  2680. // Save strategy in executor
  2681. OperatorInfoPtr op_info = cnode->user_data<OperatorInfo>();
  2682. if (op_info) {
  2683. auto stra_ptr = op_info->strategy();
  2684. if (stra_ptr) {
  2685. auto strategy = stra_ptr->GetInputDim();
  2686. // fullname with scope should be found in step parallel end ir
  2687. executor->SetCNodeStrategy(cnode->fullname_with_scope(), strategy);
  2688. }
  2689. }
  2690. continue;
  2691. }
  2692. auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2693. if (prim->name() != RESHAPE) {
  2694. continue;
  2695. }
  2696. auto root = node->func_graph();
  2697. auto grad_node = FindGrad(cnode);
  2698. if (grad_node) {
  2699. InsertShapeOp(cnode, grad_node, root);
  2700. }
  2701. }
  2702. }
  2703. void MarkForwardCNode(const FuncGraphPtr &root) {
  2704. MS_EXCEPTION_IF_NULL(root);
  2705. auto all_nodes = root->nodes();
  2706. auto graph_set = FindForwardGraphByRootNodes(all_nodes);
  2707. if (graph_set.empty()) {
  2708. MS_LOG(INFO) << "Can not find the forward graph, so mark the ops in root graph";
  2709. SetForwardFlag(all_nodes);
  2710. } else {
  2711. for (auto &func_graph : graph_set) {
  2712. MS_LOG(INFO) << "The sub graph size of root is " << root->func_graphs_used().size();
  2713. auto return_node = func_graph->get_return();
  2714. MS_EXCEPTION_IF_NULL(return_node);
  2715. auto all_dfs_nodes = DeepLinkedGraphSearch(return_node);
  2716. SetForwardFlag(all_dfs_nodes);
  2717. auto root_forward_nodes = FindRootForwardCNode(func_graph, all_nodes);
  2718. if (root_forward_nodes.empty()) {
  2719. continue;
  2720. }
  2721. // Mark forward flag for the nodes in root graph.
  2722. SetForwardFlag(root_forward_nodes);
  2723. }
  2724. }
  2725. }
  2726. Status ParallelInit() {
  2727. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  2728. int64_t device_num = ParallelContext::GetInstance()->device_num();
  2729. int64_t global_rank = ParallelContext::GetInstance()->global_rank();
  2730. int32_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2731. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  2732. auto ms_context = MsContext::GetInstance();
  2733. MS_EXCEPTION_IF_NULL(ms_context);
  2734. std::string backend = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  2735. std::string world_group;
  2736. std::string communication_backend;
  2737. if (backend == kAscendDevice || backend == kDavinciDevice) {
  2738. world_group = HCCL_WORLD_GROUP;
  2739. communication_backend = HCCL_BACKEND;
  2740. } else if (backend == kGPUDevice) {
  2741. world_group = NCCL_WORLD_GROUP;
  2742. communication_backend = NCCL_BACKEND;
  2743. } else {
  2744. MS_LOG(ERROR) << "Invalid communication backend: " << backend;
  2745. return FAILED;
  2746. }
  2747. if (split_stage_num <= 0) {
  2748. MS_LOG(ERROR) << "Invalid stage num " << split_stage_num << ", expected a positive stage number";
  2749. return FAILED;
  2750. }
  2751. uint32_t world_rank_size = 0;
  2752. if (!ParallelContext::GetInstance()->device_num_is_set()) {
  2753. if (!CommManager::GetInstance().GetRankSize(world_group, &world_rank_size)) {
  2754. MS_LOG(EXCEPTION) << "Get rank size failed";
  2755. }
  2756. device_num = UintToInt(world_rank_size);
  2757. MS_LOG(INFO) << "Get device num from communication model, the device num is " << device_num;
  2758. }
  2759. uint32_t rank_id = 0;
  2760. if (!ParallelContext::GetInstance()->global_rank_is_set()) {
  2761. if (!CommManager::GetInstance().GetRankID(world_group, &rank_id)) {
  2762. MS_LOG(EXCEPTION) << "Get rank id failed";
  2763. }
  2764. global_rank = UintToInt(rank_id);
  2765. MS_LOG(INFO) << "Get global rank from communication model, the global rank is " << global_rank;
  2766. }
  2767. if ((device_num <= 0) || (device_num > MAX_DEVICE_NUM)) {
  2768. MS_LOG(ERROR) << "Invalid device num " << device_num;
  2769. return FAILED;
  2770. }
  2771. // the device_num maybe get from communication interface
  2772. if (device_num % split_stage_num != 0) {
  2773. MS_LOG(ERROR) << "Device num " << device_num << " can't be divided by stage num " << split_stage_num;
  2774. return FAILED;
  2775. }
  2776. if ((global_rank < 0) || (global_rank >= device_num)) {
  2777. MS_LOG(ERROR) << "Global rank " << global_rank << " is out of range, the device num is " << device_num;
  2778. return FAILED;
  2779. }
  2780. std::vector<int64_t> stages;
  2781. for (int i = 0; i < split_stage_num; i++) {
  2782. stages.push_back(device_num / split_stage_num);
  2783. }
  2784. if ((split_stage_num > 1) && (parallel_mode != SEMI_AUTO_PARALLEL)) {
  2785. MS_LOG(ERROR) << "To enable the pipeline parallel, please set the parallel mode to " << SEMI_AUTO_PARALLEL;
  2786. return FAILED;
  2787. }
  2788. if (!InitDevice(device_num, global_rank, communication_backend, stages)) {
  2789. MS_LOG(ERROR) << "Init device failed";
  2790. return FAILED;
  2791. }
  2792. MS_LOG(INFO) << "The parallel context: dev num: " << device_num << ", global rank: " << global_rank
  2793. << ", backend: " << backend << ", gradients_mean: " << ParallelContext::GetInstance()->gradients_mean()
  2794. << ", gradient_fp32_sync: " << ParallelContext::GetInstance()->gradient_fp32_sync();
  2795. return SUCCESS;
  2796. }
  2797. void HandleForwardMakeTupleAndMakeList(const std::vector<AnfNodePtr> &all_nodes) {
  2798. for (auto &node : all_nodes) {
  2799. if (!AnfNodeIsPrimitive(node, MAKE_TUPLE) && !AnfNodeIsPrimitive(node, MAKE_LIST)) {
  2800. continue;
  2801. }
  2802. auto cnode = node->cast<CNodePtr>();
  2803. MS_EXCEPTION_IF_NULL(cnode);
  2804. if (!cnode->in_forward_flag()) {
  2805. continue;
  2806. }
  2807. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  2808. MS_EXCEPTION_IF_NULL(manager);
  2809. std::string op_type = AnfNodeIsPrimitive(node, MAKE_TUPLE) ? MAKE_TUPLE : MAKE_LIST;
  2810. auto make_tuple_list_user = manager->node_users()[cnode];
  2811. if (make_tuple_list_user.size() != 1) {
  2812. MS_LOG(EXCEPTION) << "Now the " << op_type << "'s user must be 1, but got " << make_tuple_list_user.size();
  2813. }
  2814. CNodePtr make_tuple_list_next_cnode = make_tuple_list_user.pop().first->cast<CNodePtr>();
  2815. MS_EXCEPTION_IF_NULL(make_tuple_list_next_cnode);
  2816. std::string make_tuple__list_user_prim_name = GetPrimName(make_tuple_list_next_cnode);
  2817. if (!IsParallelCareNode(make_tuple_list_next_cnode)) {
  2818. MS_LOG(INFO) << "The " << op_type << "'s user is " << make_tuple__list_user_prim_name
  2819. << ", no need to set operator info";
  2820. continue;
  2821. }
  2822. if (make_tuple_list_next_cnode->inputs().size() != 2) {
  2823. MS_LOG(EXCEPTION) << "Now the " << op_type << "'s user only support 1 input, but got "
  2824. << make_tuple_list_next_cnode->inputs().size() - 1;
  2825. }
  2826. MS_LOG(INFO) << "Set the " << op_type << "'s operator info, and the op name is " << make_tuple__list_user_prim_name;
  2827. OperatorInfoPtr op_info = GetDistributeOperator(make_tuple_list_next_cnode);
  2828. MS_EXCEPTION_IF_NULL(op_info);
  2829. cnode->set_user_data<OperatorInfo>(op_info);
  2830. }
  2831. }
  2832. RefKeyPair CNodeWithRefKeys(const AnfNodePtr &cnode) {
  2833. MS_EXCEPTION_IF_NULL(cnode);
  2834. std::vector<AnfNodePtr> refkeys;
  2835. if (cnode->isa<CNode>()) {
  2836. auto cnode_ptr = cnode->cast<CNodePtr>();
  2837. auto inputs = cnode_ptr->inputs();
  2838. for (auto &one_input : inputs) {
  2839. if (IsValueNode<RefKey>(one_input)) {
  2840. refkeys.push_back(one_input);
  2841. }
  2842. }
  2843. if (refkeys.size() >= 1) {
  2844. return std::make_pair(cnode, refkeys);
  2845. }
  2846. }
  2847. return {nullptr, refkeys};
  2848. }
  2849. ParameterUsersInfo FindParameterNodeUsers(const AnfNodePtr &node, bool (*IsCareNode)(const CNodePtr &)) {
  2850. // In this case, node is a Parameter
  2851. ParameterUsersInfo parameter_user_info;
  2852. MS_EXCEPTION_IF_NULL(node->func_graph());
  2853. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  2854. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  2855. for (auto &candidate : candidate_set) {
  2856. auto candidate_node = candidate.first;
  2857. if (IsPrimitiveCNode(candidate_node, prim::kPrimLoad)) {
  2858. if (candidate.second != 1) {
  2859. continue;
  2860. }
  2861. auto load_node_users = node->func_graph()->manager()->node_users()[candidate_node];
  2862. for (auto &node_user : load_node_users) {
  2863. auto cnode = node_user.first->cast<CNodePtr>();
  2864. if (cnode == nullptr || !cnode->has_user_data<OperatorInfo>() || IsSomePrimitive(cnode, RECEIVE)) {
  2865. continue;
  2866. }
  2867. (void)parameter_user_info.second.second.insert(node_user);
  2868. }
  2869. } else {
  2870. auto c = candidate_node->cast<CNodePtr>();
  2871. if (c == nullptr || !c->has_user_data<OperatorInfo>() || IsSomePrimitive(c, RECEIVE)) {
  2872. continue;
  2873. }
  2874. (void)parameter_user_info.second.second.insert(candidate);
  2875. }
  2876. }
  2877. parameter_user_info.first = node->cast<ParameterPtr>()->name();
  2878. parameter_user_info.second.first = node;
  2879. return parameter_user_info;
  2880. }
  2881. ParameterUsersInfo FindRefKeyNodeUsers(const RefKeyPair &ref_key_pair, bool (*IsCareNode)(const CNodePtr &)) {
  2882. // Dealing with the RefKey case
  2883. ParameterUsersInfo parameter_user_info;
  2884. auto refkeys = ref_key_pair.second;
  2885. auto cnode = ref_key_pair.first;
  2886. auto cnode_ptr = cnode->cast<CNodePtr>();
  2887. if ((cnode_ptr == nullptr) || !IsValueNode<Primitive>(cnode_ptr->input(0)) || !IsCareNode(cnode_ptr)) {
  2888. return parameter_user_info;
  2889. }
  2890. if (refkeys.size() > 1) {
  2891. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << "'s inputs have more than 1 RefKeys";
  2892. }
  2893. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  2894. auto cnode_func_graph = cnode->func_graph();
  2895. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  2896. // Find the RefKey being used
  2897. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  2898. for (auto &candidate : candidate_set_by_refkey) {
  2899. auto candidate_node = candidate.first;
  2900. auto c = candidate_node->cast<CNodePtr>();
  2901. if ((c == nullptr) || !IsValueNode<Primitive>(c->input(0)) || !IsCareNode(c)) {
  2902. continue;
  2903. }
  2904. parameter_user_info.second.second.add(candidate);
  2905. }
  2906. // Find the corresponding Parameter being used
  2907. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  2908. if (parameters.size() != 1) {
  2909. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  2910. }
  2911. parameter_user_info.first = parameters[0]->cast<ParameterPtr>()->name();
  2912. parameter_user_info.second.first = parameters[0];
  2913. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  2914. for (auto &candidate : candidate_set_by_para) {
  2915. auto candidate_node = candidate.first;
  2916. auto c = candidate_node->cast<CNodePtr>();
  2917. if ((c == nullptr) || !IsValueNode<Primitive>(c->input(0)) || !IsCareNode(c)) {
  2918. continue;
  2919. }
  2920. (void)parameter_user_info.second.second.insert(candidate);
  2921. }
  2922. return parameter_user_info;
  2923. }
  2924. ParameterUsersInfo FindParameterUsers(const AnfNodePtr &node, bool (*IsCareNode)(const CNodePtr &)) {
  2925. ParameterUsersInfo parameter_users_info;
  2926. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  2927. if (cnode_with_refkeys.first != nullptr) {
  2928. // the node is a ref key node
  2929. return FindRefKeyNodeUsers(cnode_with_refkeys, IsCareNode);
  2930. } else if (node->isa<Parameter>()) {
  2931. // the node is a parameter node
  2932. return FindParameterNodeUsers(node, IsCareNode);
  2933. }
  2934. return parameter_users_info;
  2935. }
  2936. Shape ParameterSliceShape(const std::pair<AnfNodePtr, int64_t> &param_info) {
  2937. auto user_cnode = param_info.first->cast<CNodePtr>();
  2938. MS_EXCEPTION_IF_NULL(user_cnode);
  2939. auto user_input_index = param_info.second;
  2940. OperatorInfoPtr op_info = user_cnode->user_data<OperatorInfo>();
  2941. MS_EXCEPTION_IF_NULL(op_info);
  2942. size_t input_tensor_info_size = op_info->inputs_tensor_info().size();
  2943. if (SizeToLong(input_tensor_info_size) <= user_input_index - 1) {
  2944. MS_LOG(EXCEPTION) << op_info->name() << ": the size of inputs tensor info is " << input_tensor_info_size
  2945. << ", but the index is " << user_input_index - 1;
  2946. }
  2947. TensorInfo tensor_info = op_info->inputs_tensor_info()[user_input_index - 1];
  2948. MS_LOG(DEBUG) << "The op name is " << op_info->name() << ", the parameter index is " << user_input_index - 1
  2949. << ", the slice shape is " << ShapeToString(tensor_info.slice_shape()) << ", the origin shape is "
  2950. << ShapeToString(tensor_info.shape());
  2951. return tensor_info.slice_shape();
  2952. }
  2953. void CheckParameterSplit(const std::vector<AnfNodePtr> &all_nodes) {
  2954. for (auto &node : all_nodes) {
  2955. ParameterUsersInfo parameter_users_info = FindParameterUsers(node, IsParallelCareNode);
  2956. auto users_set = parameter_users_info.second.second;
  2957. if (users_set.size() <= 1) {
  2958. continue;
  2959. }
  2960. auto parameter_name = parameter_users_info.first;
  2961. MS_LOG(INFO) << "The parameter: " << parameter_name << " has " << users_set.size() << " users";
  2962. auto first_user = users_set.pop();
  2963. Shape first_user_slice_shape = ParameterSliceShape(first_user);
  2964. for (auto &user : users_set) {
  2965. Shape user_slice_shape = ParameterSliceShape(user);
  2966. if (first_user_slice_shape != user_slice_shape) {
  2967. MS_LOG(EXCEPTION) << "The parameter: " << parameter_name
  2968. << " has multiple users, but the split strategies are different";
  2969. }
  2970. }
  2971. }
  2972. }
  2973. bool CreateGroupsByCkptFile(const std::string &file) {
  2974. GroupInfoMap group_info_map;
  2975. if (StrategyCheckpoint::GetInstance().LoadGroupInfo(file, &group_info_map) != SUCCESS) {
  2976. return false;
  2977. }
  2978. if (CreateGroups(group_info_map) != SUCCESS) {
  2979. return false;
  2980. }
  2981. MS_LOG(INFO) << "Create groups by checkpoint file success";
  2982. return true;
  2983. }
  2984. bool IsUsedParameter(const FuncGraphPtr &graph, const AnfNodePtr &parameter) {
  2985. MS_EXCEPTION_IF_NULL(graph);
  2986. MS_EXCEPTION_IF_NULL(parameter);
  2987. auto manager = graph->manager();
  2988. auto node_users = manager->node_users()[parameter];
  2989. if (node_users.empty()) {
  2990. return false;
  2991. }
  2992. for (auto node_user : node_users) {
  2993. auto use_node = node_user.first->cast<CNodePtr>();
  2994. if (IsValueNode<FuncGraph>(use_node->input(0))) {
  2995. auto graph_sub = GetValueNode<FuncGraphPtr>(use_node->input(0));
  2996. auto parameters = graph_sub->parameters();
  2997. auto parameter_sub = parameters[node_user.second - 1];
  2998. return IsUsedParameter(graph_sub, parameter_sub);
  2999. }
  3000. if (use_node->input(0)->isa<CNode>()) {
  3001. auto cnode = use_node->input(0)->cast<CNodePtr>();
  3002. if (!IsSomePrimitive(cnode, J) || !IsValueNode<FuncGraph>(cnode->input(1))) {
  3003. return true;
  3004. }
  3005. auto graph_sub = GetValueNode<FuncGraphPtr>(cnode->input(1));
  3006. auto parameters = graph_sub->parameters();
  3007. auto parameter_sub = parameters[node_user.second - 1];
  3008. return IsUsedParameter(graph_sub, parameter_sub);
  3009. }
  3010. return true;
  3011. }
  3012. return true;
  3013. }
  3014. static void HandleNoUsedParameter(const FuncGraphPtr &root) {
  3015. MS_EXCEPTION_IF_NULL(root);
  3016. bool full_batch = ParallelContext::GetInstance()->full_batch();
  3017. if (full_batch) {
  3018. return;
  3019. }
  3020. auto dev_num = g_device_manager->stage_device_num();
  3021. auto parameters = root->parameters();
  3022. for (auto &parameter : parameters) {
  3023. if (IsUsedParameter(root, parameter)) {
  3024. continue;
  3025. }
  3026. auto parameter_shape = GetNodeShape(parameter);
  3027. if (parameter_shape.empty()) {
  3028. continue;
  3029. }
  3030. Shape slice_shape = parameter_shape[0];
  3031. if (slice_shape.empty()) {
  3032. continue;
  3033. }
  3034. slice_shape[0] = slice_shape[0] / dev_num;
  3035. auto slice_shape_ptr = std::make_shared<abstract::Shape>(slice_shape);
  3036. auto abstract = parameter->abstract();
  3037. MS_EXCEPTION_IF_NULL(abstract);
  3038. auto abstract_cloned = abstract->Clone();
  3039. MS_EXCEPTION_IF_NULL(abstract_cloned);
  3040. abstract_cloned->set_shape(slice_shape_ptr);
  3041. parameter->set_abstract(abstract_cloned);
  3042. }
  3043. }
  3044. bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer) {
  3045. #if (ENABLE_CPU && (ENABLE_D || ENABLE_GPU))
  3046. if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
  3047. return false;
  3048. }
  3049. #endif
  3050. MS_EXCEPTION_IF_NULL(root);
  3051. MS_EXCEPTION_IF_NULL(optimizer);
  3052. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  3053. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  3054. // assume no change to graph
  3055. bool changes = false;
  3056. // control whether use model_parallel mode
  3057. if (!root->has_flag(AUTO_PARALLEL) || ((parallel_mode != AUTO_PARALLEL) && (parallel_mode != SEMI_AUTO_PARALLEL)) ||
  3058. (root->has_flag(SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY))) {
  3059. if (!root->has_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY)) {
  3060. if (HasStrategy(root)) {
  3061. MS_LOG(INFO) << "Strategies ignored in " << parallel_mode
  3062. << ", set_strategy() only valid in [semi_]auto_parallel.";
  3063. }
  3064. root->set_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY, true);
  3065. }
  3066. return changes;
  3067. }
  3068. struct timeval start_time, end_time;
  3069. (void)gettimeofday(&start_time, nullptr);
  3070. MS_LOG(INFO) << "Now entering step parallel";
  3071. DumpGraph(root, std::string(STEP_PARALLEL_BEGIN));
  3072. pipeline::ResourceBasePtr res = optimizer->resource();
  3073. MS_EXCEPTION_IF_NULL(res);
  3074. FuncGraphManagerPtr manager = res->manager();
  3075. MS_EXCEPTION_IF_NULL(manager);
  3076. AnfNodePtr ret = root->get_return();
  3077. MS_EXCEPTION_IF_NULL(ret);
  3078. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  3079. std::reverse(all_nodes.begin(), all_nodes.end());
  3080. if (parallel_mode != AUTO_PARALLEL) {
  3081. TOTAL_OPS = 0;
  3082. auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
  3083. if (pipeline_stages <= 1 && ParallelInit() != SUCCESS) {
  3084. MS_LOG(EXCEPTION) << "Parallel init failed";
  3085. }
  3086. // mark the forward cnodes, parallel only care these nodes
  3087. MarkForwardCNode(root);
  3088. if (FindCommunicationOp(all_nodes)) {
  3089. MS_LOG(EXCEPTION) << "The graph contain communication op";
  3090. }
  3091. // extract shape and strategy, set operator_info
  3092. ExtractInformation(all_nodes, root->has_flag(TRAINING));
  3093. ReshapeInit(all_nodes);
  3094. }
  3095. HandleRootReshapeAndSaveStrategy(all_nodes);
  3096. HandleForwardMakeTupleAndMakeList(all_nodes);
  3097. // if the input or parameter has multiple users, check whether its split strategies are consistent.
  3098. CheckParameterSplit(all_nodes);
  3099. // save strategy as checkpoint for multi-train
  3100. if (StrategyCheckpoint::GetInstance().SaveCheckPointOn()) {
  3101. CheckpointStrategy(all_nodes);
  3102. }
  3103. HandleSymbolicKeyInstance(root, all_nodes);
  3104. // cover Parallel shape
  3105. CoverSliceShape(root);
  3106. // handle input is not used
  3107. HandleNoUsedParameter(root);
  3108. // set the shape for optimizer's clone tensor
  3109. SetClonedTensorShapeForOptimizer(root);
  3110. // ForwardCommunication BackwardCommunication TensorRedistribution
  3111. ParallelCommunication(root, all_nodes, manager);
  3112. auto group_info = g_device_manager->group_info();
  3113. if (StrategyCheckpoint::GetInstance().group_info_save_on() &&
  3114. StrategyCheckpoint::GetInstance().SaveGroupInfo(group_info) != SUCCESS) {
  3115. MS_LOG(EXCEPTION) << "Save group info failed";
  3116. }
  3117. DumpGraph(root, std::string(STEP_PARALLEL_END));
  3118. // step parallel only run once
  3119. root->set_flag(SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  3120. res->results()[pipeline::kStepParallelGraph] = root;
  3121. // in auto parallel mode, no need to check if stategies set
  3122. root->set_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY, true);
  3123. (void)gettimeofday(&end_time, nullptr);
  3124. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  3125. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  3126. MS_LOG(INFO) << "Now leaving step parallel, used time: " << time << " us";
  3127. return changes;
  3128. }
  3129. // Needed by rec_parser
  3130. std::vector<std::string> ExtractInputsTensorName(const CNodePtr &node) {
  3131. std::vector<std::string> name_inputs;
  3132. std::vector<AnfNodePtr> all_inputs = node->inputs();
  3133. std::vector<AnfNodePtr> node_inputs{all_inputs.begin() + 1, all_inputs.end()};
  3134. std::string node_id = node->UniqueId();
  3135. name_inputs.push_back(node_id);
  3136. for (auto &input : node_inputs) {
  3137. std::string name = input->UniqueId();
  3138. name_inputs.push_back(name);
  3139. }
  3140. return name_inputs;
  3141. }
  3142. } // namespace parallel
  3143. } // namespace mindspore