You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

step_parallel.cc 142 kB

4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454
  1. /**
  2. * Copyright 2019-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/step_parallel.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <utility>
  25. #include <queue>
  26. #include "utils/hash_map.h"
  27. #include "base/core_ops.h"
  28. #include "frontend/operator/ops.h"
  29. #include "frontend/optimizer/optimizer.h"
  30. #include "frontend/parallel/auto_parallel/graph_costmodel.h"
  31. #include "include/common/utils/parallel_context.h"
  32. #include "frontend/parallel/device_manager.h"
  33. #include "frontend/parallel/dynamic_creator.h"
  34. #include "frontend/parallel/graph_util/generate_graph.h"
  35. #include "frontend/parallel/graph_util/graph_info.h"
  36. #include "frontend/parallel/graph_util/node_info.h"
  37. #include "frontend/parallel/graph_util/pipeline_split_utils.h"
  38. #include "frontend/parallel/node_check.h"
  39. #include "frontend/parallel/parameter_manager.h"
  40. #include "frontend/parallel/ops_info/matmul_info.h"
  41. #include "ir/param_info.h"
  42. #include "ir/tensor.h"
  43. #include "utils/trace_base.h"
  44. #include "include/common/utils/comm_manager.h"
  45. #include "utils/ms_context.h"
  46. #include "utils/symbolic.h"
  47. #include "mindspore/core/utils/parallel_node_check.h"
  48. #include "frontend/parallel/parallel_optimizer/opt_param_mgr.h"
  49. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  50. #include "ps/util.h"
  51. #include "ps/ps_context.h"
  52. #endif
  53. using mindspore::tensor::Tensor;
  54. namespace mindspore {
  55. namespace parallel {
  56. static const std::set<std::string> COMMUNICATION_OPS = {ALL_REDUCE, ALL_GATHER, ALL_TO_ALL, REDUCE_SCATTER};
  57. static const std::set<std::string> INVALID_LOSS_OPS = {GET_NEXT, VIRTUALLOSS, LOAD, UPDATESTATE};
  58. static const std::set<std::string> NO_INPUT_TENSOR_OPS = {UNIFORM_REAL};
  59. static const std::vector<std::pair<const std::string, int64_t>> REDUCE_SUM_MATCH_PATTERN = {
  60. std::make_pair(MAKE_TUPLE, 1), std::make_pair(ADDN, 1), std::make_pair(SQRT, 1)};
  61. // g_RefMap, for CNode B input i is a RefKey[Parameter C],
  62. // it will be one item in map with key: C, and value: (B, i)
  63. std::map<AnfNodePtr, std::pair<AnfNodePtr, int64_t>> g_RefMap;
  64. const uint32_t MAX_BFS_DEPTH = 7;
  65. void SetMiniStepOpDoMirrorLabel(std::vector<AnfNodePtr> new_node_input, bool do_mirror, bool accu_flag) {
  66. if (new_node_input.empty()) {
  67. return;
  68. }
  69. auto prim_anf_node = new_node_input[0]->cast<ValueNodePtr>();
  70. auto prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  71. MS_EXCEPTION_IF_NULL(prim);
  72. auto attrs = prim->attrs();
  73. attrs[DO_MIRROR] = MakeValue<bool>(do_mirror);
  74. attrs[ADD_ACCU] = MakeValue<bool>(accu_flag);
  75. prim->SetAttrs(attrs);
  76. }
  77. void SetAllReduceRecomputeFlag(const std::vector<AnfNodePtr> &new_node_input, const CNodePtr &node) {
  78. if (new_node_input.empty()) {
  79. return;
  80. }
  81. auto prim_anf_node = new_node_input[0]->cast<ValueNodePtr>();
  82. auto prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  83. MS_EXCEPTION_IF_NULL(prim);
  84. auto attrs = prim->attrs();
  85. auto anf_node = node->input(0)->cast<ValueNodePtr>();
  86. auto prim_node = GetValueNode<PrimitivePtr>(anf_node);
  87. MS_EXCEPTION_IF_NULL(prim_node);
  88. auto node_attrs = prim_node->attrs();
  89. if (node_attrs.find(RECOMPUTE_COMM_OP) != node_attrs.end() && !GetValue<bool>(node_attrs[RECOMPUTE_COMM_OP])) {
  90. attrs[RECOMPUTE] = MakeValue<bool>(false);
  91. prim->SetAttrs(attrs);
  92. MS_LOG(INFO) << "Do not recompute the forward communication operator of " << prim_node->ToString();
  93. }
  94. }
  95. std::vector<AnfNodePtr> CreateInput(const Operator &op, const AnfNodePtr &node, const std::string &instance_name) {
  96. MS_EXCEPTION_IF_NULL(node);
  97. OperatorArgs arg_forward = op.second;
  98. ValuePtr pyop_instance = CreateOpInstance(arg_forward.first, op.first, instance_name);
  99. MS_EXCEPTION_IF_NULL(pyop_instance);
  100. OperatorParams params = arg_forward.second;
  101. std::vector<AnfNodePtr> new_node_input = {NewValueNode(pyop_instance), node};
  102. if (!params.empty()) {
  103. for (auto &param : params) {
  104. AnfNodePtr val = NewValueNode(param.first.second);
  105. MS_EXCEPTION_IF_NULL(val);
  106. int64_t position = param.second;
  107. (void)new_node_input.insert(new_node_input.begin() + position, val);
  108. }
  109. }
  110. // if the op have 'group' attr, set the rank list name for the op
  111. SetCommunicationOpGroupLabel(new_node_input);
  112. return new_node_input;
  113. }
  114. AnfNodePtr GetAccuGrad(const std::vector<AnfNodePtr> &parameters, const std::string &weight_name) {
  115. for (auto &param : parameters) {
  116. if (!ParameterIsCloned(param)) {
  117. continue;
  118. }
  119. auto param_ptr = param->cast<ParameterPtr>();
  120. MS_EXCEPTION_IF_NULL(param_ptr);
  121. if (param_ptr->name().find(weight_name) != std::string::npos &&
  122. param_ptr->name().find(ACCU_GRADS) != std::string::npos) {
  123. MS_LOG(INFO) << "Find the accumulation grad node: " << param_ptr->name();
  124. return param;
  125. }
  126. }
  127. return nullptr;
  128. }
  129. std::vector<AnfNodePtr> CreateMirrorInput(const FuncGraphPtr &root, const Operator &op, const AnfNodePtr &node,
  130. const std::string &instance_name, const std::string &weight_name) {
  131. MS_EXCEPTION_IF_NULL(root);
  132. MS_EXCEPTION_IF_NULL(node);
  133. MS_EXCEPTION_IF_NULL(root->manager());
  134. std::string op_name = op.first;
  135. OperatorArgs arg_forward = op.second;
  136. AnfNodePtr grad_accu = nullptr;
  137. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  138. int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  139. if (grad_accumulation_step > 1 || split_stage_num > 1) {
  140. auto parameters = root->parameters();
  141. grad_accu = GetAccuGrad(parameters, weight_name);
  142. if (!grad_accu) {
  143. if (op_name == MIRROR_MINI_STEP_OPERATOR) {
  144. op_name = MIRROR_OPERATOR;
  145. arg_forward.first.pop_back();
  146. } else if (op_name == MINI_STEP_ALL_GATHER || op_name == MIRROR_MICRO_STEP_OPERATOR ||
  147. op_name == MICRO_STEP_ALL_GATHER) {
  148. MS_LOG(EXCEPTION) << "You should define `accu_grads` when use " << op_name << " parameter:" << weight_name;
  149. }
  150. }
  151. }
  152. ValuePtr pyop_instance = CreateOpInstance(arg_forward.first, op_name, instance_name);
  153. MS_EXCEPTION_IF_NULL(pyop_instance);
  154. OperatorParams params = arg_forward.second;
  155. std::vector<AnfNodePtr> new_node_input;
  156. if (op_name == MIRROR_MINI_STEP_OPERATOR || op_name == MINI_STEP_ALL_GATHER ||
  157. op_name == MIRROR_MICRO_STEP_OPERATOR || op_name == MICRO_STEP_ALL_GATHER) {
  158. new_node_input = {NewValueNode(pyop_instance), node, grad_accu};
  159. MS_LOG(INFO) << "Insert the grad accumulation node as the mirror op's input";
  160. } else {
  161. new_node_input = {NewValueNode(pyop_instance), node};
  162. }
  163. if (!params.empty()) {
  164. for (auto &param : params) {
  165. AnfNodePtr val = NewValueNode(param.first.second);
  166. MS_EXCEPTION_IF_NULL(val);
  167. int64_t position = param.second;
  168. (void)new_node_input.insert(new_node_input.begin() + position, val);
  169. }
  170. }
  171. // if the op have 'group' attr, set the rank list name for the op
  172. SetCommunicationOpGroupLabel(new_node_input);
  173. // gradient accumulation
  174. if (grad_accumulation_step > 1) {
  175. bool add_accu = root->has_flag(kAccumulation);
  176. // MiniStep need to do mirror at each micro step as we use the gradient accumulation sharding,
  177. SetMiniStepOpDoMirrorLabel(new_node_input, !add_accu, !add_accu);
  178. }
  179. return new_node_input;
  180. }
  181. void InsertNode(const Operator &op, const CNodePtr &node, size_t index, const AnfNodePtr &pre_node,
  182. const FuncGraphPtr &func_graph, const std::string &instance_name, const std::string &param_name = "",
  183. const FuncGraphPtr &root = nullptr) {
  184. // insert new node before the node
  185. FuncGraphManagerPtr manager = func_graph->manager();
  186. MS_EXCEPTION_IF_NULL(manager);
  187. ScopePtr scope = node->scope();
  188. MS_EXCEPTION_IF_NULL(scope);
  189. std::vector<AnfNodePtr> node_input;
  190. if (root && !param_name.empty()) {
  191. node_input = CreateMirrorInput(root, op, pre_node, instance_name, param_name);
  192. } else {
  193. node_input = CreateInput(op, pre_node, instance_name);
  194. }
  195. CNodePtr new_node = func_graph->NewCNode(node_input);
  196. MS_EXCEPTION_IF_NULL(new_node);
  197. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  198. new_node->set_in_forward_flag(true); // mark forward flag
  199. }
  200. auto new_node_value = node_input[0]->cast<ValueNodePtr>();
  201. MS_EXCEPTION_IF_NULL(new_node_value);
  202. PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
  203. new_node_prim->set_instance_name(instance_name);
  204. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  205. if (instance_name.find(NOT_RECOMPUTE) != std::string::npos) {
  206. new_node_prim->set_attr("recompute", MakeValue(false));
  207. }
  208. new_node->set_scope(scope);
  209. node_input[0]->set_scope(scope);
  210. manager->SetEdge(node, SizeToInt(index), new_node);
  211. MS_LOG(INFO) << "Insert " << instance_name << " success";
  212. }
  213. // Replace pre_node with pre_node->op
  214. static CNodePtr ReplaceNode(const Operator &op, const AnfNodePtr &pre_node, const FuncGraphPtr &func_graph,
  215. const std::string &instance_name, const std::string &param_name = "",
  216. const FuncGraphPtr &root = nullptr) {
  217. // insert new node before the node
  218. FuncGraphManagerPtr manager = func_graph->manager();
  219. MS_EXCEPTION_IF_NULL(manager);
  220. ScopePtr scope = pre_node->scope();
  221. MS_EXCEPTION_IF_NULL(scope);
  222. std::vector<AnfNodePtr> node_input;
  223. if (root && !param_name.empty()) {
  224. node_input = CreateMirrorInput(root, op, pre_node, instance_name, param_name);
  225. } else {
  226. node_input = CreateInput(op, pre_node, instance_name);
  227. }
  228. CNodePtr new_node = func_graph->NewCNode(node_input);
  229. MS_EXCEPTION_IF_NULL(new_node);
  230. if (instance_name.find(SPLIT_SENS) == std::string::npos) {
  231. new_node->set_in_forward_flag(true); // mark forward flag
  232. }
  233. auto new_node_prim = GetValueNode<PrimitivePtr>(node_input[0]);
  234. new_node_prim->set_instance_name(instance_name);
  235. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  236. if (instance_name.find(NOT_RECOMPUTE) != std::string::npos) {
  237. new_node_prim->set_attr("recompute", MakeValue(false));
  238. }
  239. new_node->set_scope(scope);
  240. node_input[0]->set_scope(scope);
  241. manager->Replace(pre_node, new_node);
  242. MS_LOG(INFO) << "Insert " << instance_name << " success";
  243. return new_node;
  244. }
  245. void ForwardCommunication(OperatorVector forward_op, const CNodePtr &node) {
  246. MS_EXCEPTION_IF_NULL(node);
  247. // step1:get graph manager distribute_operator
  248. FuncGraphPtr func_graph = node->func_graph();
  249. MS_EXCEPTION_IF_NULL(func_graph);
  250. FuncGraphManagerPtr manager = func_graph->manager();
  251. MS_EXCEPTION_IF_NULL(manager);
  252. auto uses_set = manager->node_users()[node];
  253. CNodePtr node_to_insert = node;
  254. for (auto &uses_pair : uses_set) {
  255. auto uses_cnode = uses_pair.first->cast<CNodePtr>();
  256. MS_EXCEPTION_IF_NULL(uses_cnode);
  257. if (!IsValueNode<Primitive>(uses_cnode->input(0))) {
  258. break;
  259. }
  260. PrimitivePtr value_node_prim = GetValueNode<PrimitivePtr>(uses_cnode->input(0));
  261. MS_EXCEPTION_IF_NULL(value_node_prim);
  262. if (value_node_prim->name() == prim::kTupleGetItem) {
  263. if (uses_set.size() > 1) {
  264. MS_LOG(EXCEPTION) << "Now only support one output, but got " << uses_set.size();
  265. }
  266. node_to_insert = uses_cnode;
  267. }
  268. }
  269. MS_EXCEPTION_IF_NULL(node_to_insert);
  270. std::reverse(forward_op.begin(), forward_op.end());
  271. // step2:traverse op_list and insert node
  272. for (size_t index = 0; index < forward_op.size(); ++index) {
  273. std::string instance_name_base = FORWARD_OP;
  274. std::string instance_name = instance_name_base + "_" + CreateInstanceName(node, index);
  275. std::vector<AnfNodePtr> forward_input = CreateInput(forward_op[index], node_to_insert, instance_name);
  276. SetAllReduceRecomputeFlag(forward_input, node_to_insert);
  277. CNodePtr forward_node = func_graph->NewCNode(forward_input); // using NewCNode to create anfnode
  278. MS_EXCEPTION_IF_NULL(forward_node);
  279. ScopePtr scope = node->scope();
  280. MS_EXCEPTION_IF_NULL(scope);
  281. forward_node->set_scope(scope);
  282. forward_node->set_in_forward_flag(true);
  283. forward_input[0]->set_scope(scope);
  284. (void)manager->Replace(node_to_insert, forward_node); // using Replace function to insert node
  285. }
  286. }
  287. CNodePtr InsertMakeTuple(const AnfNodePtr &prev, uint64_t num, const FuncGraphPtr &func_graph) {
  288. MS_EXCEPTION_IF_NULL(prev);
  289. MS_EXCEPTION_IF_NULL(func_graph);
  290. std::vector<AnfNodePtr> make_tuple_inputs;
  291. make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
  292. for (uint64_t i = 0; i < num; i++) {
  293. std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), prev,
  294. CreatInt64Imm(UlongToLong(i))};
  295. auto tuple_get_item = func_graph->NewCNode(tuple_get_item_inputs);
  296. MS_EXCEPTION_IF_NULL(tuple_get_item);
  297. make_tuple_inputs.push_back(tuple_get_item);
  298. }
  299. auto make_tuple = func_graph->NewCNode(make_tuple_inputs);
  300. MS_EXCEPTION_IF_NULL(make_tuple);
  301. FuncGraphManagerPtr manager = func_graph->manager();
  302. MS_EXCEPTION_IF_NULL(manager);
  303. (void)manager->Replace(prev, make_tuple);
  304. return make_tuple;
  305. }
  306. void InsertRedistribution(const RedistributionOpListPtr &redistribution_oplist_ptr, const CNodePtr &node,
  307. const FuncGraphPtr &func_graph, int64_t pos, const CNodePtr &pre_node) {
  308. MS_EXCEPTION_IF_NULL(node);
  309. MS_EXCEPTION_IF_NULL(pre_node);
  310. MS_EXCEPTION_IF_NULL(func_graph);
  311. FuncGraphManagerPtr manager = func_graph->manager();
  312. MS_EXCEPTION_IF_NULL(manager);
  313. if ((redistribution_oplist_ptr->first).size() != (redistribution_oplist_ptr->second).size()) {
  314. MS_LOG(EXCEPTION) << "size of OperatorVector and OutPutInfoVector must be the same!";
  315. }
  316. for (size_t index = 0; index < (redistribution_oplist_ptr->first).size(); ++index) {
  317. if (pos >= SizeToLong(node->inputs().size())) {
  318. MS_LOG(EXCEPTION) << "InsertRedistribution:pos can't be larger than node's inputs'size";
  319. }
  320. // Create new node
  321. AnfNodePtr target_node = node->input(LongToSize(pos));
  322. MS_EXCEPTION_IF_NULL(target_node);
  323. // Create instance_name
  324. auto op = (redistribution_oplist_ptr->first)[index];
  325. std::string op_name = (redistribution_oplist_ptr->first)[index].first;
  326. std::string instance_name_base = REDISTRIBUTION_OP;
  327. std::string instance_name = instance_name_base + "_" + CreateInstanceName(pre_node, index) + op_name;
  328. auto prim_out = GetCNodePrimitive(node);
  329. auto prim_in = GetCNodePrimitive(pre_node);
  330. if (prim_out != nullptr && prim_in != nullptr) {
  331. auto prim_out_attr = prim_out->attrs();
  332. auto prim_in_attr = prim_in->attrs();
  333. if (((prim_out_attr.find(RECOMPUTE_COMM_OP) != prim_out_attr.end() &&
  334. !GetValue<bool>(prim_out_attr[RECOMPUTE_COMM_OP])) ||
  335. (prim_in_attr.find(RECOMPUTE_COMM_OP) != prim_in_attr.end() &&
  336. !GetValue<bool>(prim_in_attr[RECOMPUTE_COMM_OP]))) &&
  337. COMMUNICATION_OPS.find(op_name) != COMMUNICATION_OPS.end()) {
  338. MS_LOG(INFO) << "The redistribution node would not be recomputed.";
  339. instance_name = instance_name + "_" + NOT_RECOMPUTE;
  340. }
  341. }
  342. InsertNode(op, node, LongToSize(pos), target_node, func_graph, instance_name);
  343. if ((redistribution_oplist_ptr->second)[index].first) {
  344. target_node = node->input(LongToSize(pos));
  345. MS_EXCEPTION_IF_NULL(target_node);
  346. (void)InsertMakeTuple(target_node, (redistribution_oplist_ptr->second)[index].second, func_graph);
  347. }
  348. }
  349. }
  350. void InsertGetTensorSliceOp(const Operator &op, const CNodePtr &node, const FuncGraphPtr &func_graph, int64_t pos,
  351. const std::string &instance_name) {
  352. if (func_graph == nullptr) {
  353. MS_LOG(EXCEPTION) << "InsertGetTensorSliceOp: the graph is null, the instance name is " << instance_name;
  354. }
  355. FuncGraphManagerPtr manager = func_graph->manager();
  356. MS_EXCEPTION_IF_NULL(manager);
  357. if (pos >= SizeToLong(node->inputs().size())) {
  358. MS_LOG(EXCEPTION) << "InsertGetTensorSliceOp: pos can't be larger than node's inputs'size, the instance name is "
  359. << instance_name;
  360. }
  361. // Create new node
  362. AnfNodePtr pre_node = node->input(LongToSize(pos));
  363. MS_EXCEPTION_IF_NULL(pre_node);
  364. InsertNode(op, node, LongToSize(pos), pre_node, func_graph, instance_name);
  365. }
  366. TensorLayout GetTensorInLayout(const CNodePtr &middle_node, const PrimitivePtr &middle_prim,
  367. const OperatorInfoPtr &distribute_operator) {
  368. TensorInfo tensorinfo_in;
  369. if (middle_prim->name() == prim::kTupleGetItem) {
  370. auto value_node = middle_node->input(2)->cast<ValueNodePtr>();
  371. MS_EXCEPTION_IF_NULL(value_node);
  372. size_t index_s = LongToSize(GetValue<int64_t>(value_node->value()));
  373. if (index_s >= distribute_operator->outputs_tensor_info().size()) {
  374. MS_LOG(EXCEPTION) << "The index out of range, index: " << index_s
  375. << ", vector size: " << distribute_operator->outputs_tensor_info().size();
  376. }
  377. tensorinfo_in = distribute_operator->outputs_tensor_info()[index_s];
  378. } else {
  379. if (distribute_operator->outputs_tensor_info().empty()) {
  380. MS_LOG(EXCEPTION) << "The outputs tensor info is empty";
  381. }
  382. tensorinfo_in = distribute_operator->outputs_tensor_info()[0];
  383. }
  384. return tensorinfo_in.tensor_layout();
  385. }
  386. OperatorInfoPtr GetDistributeOperator(const CNodePtr &node) {
  387. MS_EXCEPTION_IF_NULL(node);
  388. if (!IsParallelCareNode(node)) {
  389. return nullptr;
  390. }
  391. OperatorInfoPtr distribute_operator = node->user_data<OperatorInfo>();
  392. return distribute_operator;
  393. }
  394. void Redistribution(const std::pair<AnfNodePtr, int64_t> &node_pair, const OperatorInfoPtr &distribute_operator,
  395. const CNodePtr &middle_node, int64_t index, TensorRedistribution tensor_redistribution,
  396. const CNodePtr &pre_node) {
  397. FuncGraphPtr func_graph = middle_node->func_graph();
  398. if (func_graph == nullptr) {
  399. MS_LOG(EXCEPTION) << "Redistribution:get graph failed";
  400. }
  401. CNodePtr next_node = node_pair.first->cast<CNodePtr>();
  402. MS_EXCEPTION_IF_NULL(next_node);
  403. auto middle_value = middle_node->input(0)->cast<ValueNodePtr>();
  404. MS_EXCEPTION_IF_NULL(middle_value);
  405. PrimitivePtr middle_prim = middle_value->value()->cast<PrimitivePtr>();
  406. MS_EXCEPTION_IF_NULL(middle_prim);
  407. OperatorInfoPtr next_distribute_operator = GetDistributeOperator(next_node);
  408. if (next_distribute_operator == nullptr) {
  409. MS_LOG(EXCEPTION) << "Failure: " << next_node->ToString() << " GetDistributeOperator failed";
  410. }
  411. RankList dev_list = distribute_operator->stage_device_list();
  412. std::string next_prim_name = GetValueNode<PrimitivePtr>(next_node->input(0))->name();
  413. MS_LOG(DEBUG) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim " << next_prim_name;
  414. MS_LOG(DEBUG) << "Redistribution: middle_node " << middle_node->ToString() << " next_node " << next_node->ToString();
  415. // extract tensor layout in and out
  416. if (distribute_operator->outputs_tensor_info().empty()) {
  417. MS_LOG(WARNING) << "pre_node's tensorinfo_in is empty, operator name is " << distribute_operator->name();
  418. return;
  419. }
  420. if (LongToSize(index - 1) >= next_distribute_operator->inputs_tensor_info().size()) {
  421. MS_LOG(WARNING) << "The index is out of range, the index is " << (index - 1) << ", the vector size is "
  422. << next_distribute_operator->inputs_tensor_info().size() << "next operator name is "
  423. << next_distribute_operator->name();
  424. return;
  425. }
  426. TensorInfo tensorinfo_out = next_distribute_operator->inputs_tensor_info()[LongToSize(index - 1)];
  427. TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout();
  428. TensorLayout tensorlayout_in = GetTensorInLayout(middle_node, middle_prim, distribute_operator);
  429. if (IsPrimitiveCNode(middle_node, prim::kPrimReceive)) {
  430. tensorlayout_in = *(middle_node->user_data<TensorLayout>());
  431. }
  432. if (tensor_redistribution.Init(tensorlayout_in, tensorlayout_out, dev_list) == FAILED) {
  433. MS_LOG(ERROR) << "Redistribution: middle_prim " << middle_prim->name() << " next_prim : " << next_prim_name;
  434. MS_LOG(ERROR) << "Redistribution: middle_node " << middle_node->ToString() << " next_node "
  435. << next_node->ToString();
  436. DumpGraph(func_graph, "redistribution_error");
  437. MS_LOG(EXCEPTION) << "Failure:tensor_redistribution init failed";
  438. }
  439. RedistributionOpListPtr redistribution_oplist_ptr = tensor_redistribution.InferTensorRedistributionOperatorList();
  440. if (redistribution_oplist_ptr == nullptr) {
  441. MS_LOG(EXCEPTION) << "Failure:InferTensorRedistribution failed";
  442. }
  443. MS_LOG(DEBUG) << "Redistribution size " << redistribution_oplist_ptr->first.size();
  444. if (!redistribution_oplist_ptr->first.empty()) {
  445. // insert node before next node
  446. InsertRedistribution(redistribution_oplist_ptr, next_node, func_graph, node_pair.second, pre_node);
  447. }
  448. }
  449. bool StrategyFound(const mindspore::HashMap<std::string, ValuePtr> &attrs) {
  450. auto iter = attrs.find(IN_STRATEGY);
  451. return !((iter == attrs.end()) || (iter->second->type_name() == NONE));
  452. }
  453. bool AttrFound(const mindspore::HashMap<std::string, ValuePtr> &attrs, const std::string &target) {
  454. auto iter = attrs.find(target);
  455. return !((iter == attrs.end()) || (iter->second->type_name() == NONE));
  456. }
  457. bool HasStrategy(const FuncGraphPtr &root) {
  458. AnfNodePtr ret = root->get_return();
  459. MS_EXCEPTION_IF_NULL(ret);
  460. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  461. for (auto &node : all_nodes) {
  462. auto cnode = node->cast<CNodePtr>();
  463. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  464. continue;
  465. }
  466. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  467. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  468. auto attrs = prim->attrs();
  469. if (StrategyFound(attrs)) {
  470. return true;
  471. }
  472. }
  473. return false;
  474. }
  475. bool IsCommunicationOp(const PrimitivePtr &prim) {
  476. MS_EXCEPTION_IF_NULL(prim);
  477. return (COMMUNICATION_OPS.find(prim->name()) != COMMUNICATION_OPS.end());
  478. }
  479. bool FindCommunicationOp(const std::vector<AnfNodePtr> &all_nodes) {
  480. for (auto &node : all_nodes) {
  481. MS_EXCEPTION_IF_NULL(node);
  482. if (!node->isa<CNode>()) {
  483. continue;
  484. }
  485. auto cnode = node->cast<CNodePtr>();
  486. if (!IsValueNode<Primitive>(cnode->input(0))) {
  487. continue;
  488. }
  489. ValueNodePtr prim_value_node = cnode->input(0)->cast<ValueNodePtr>();
  490. MS_EXCEPTION_IF_NULL(prim_value_node);
  491. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_value_node);
  492. MS_EXCEPTION_IF_NULL(prim);
  493. if (IsCommunicationOp(prim) && cnode->in_forward_flag()) {
  494. MS_EXCEPTION_IF_NULL(prim_value_node->scope());
  495. MS_LOG(INFO) << "The graph contain communication op: " << prim->name() << ", scope name is "
  496. << prim_value_node->scope()->name();
  497. return true;
  498. }
  499. }
  500. return false;
  501. }
  502. void StepRedistribution(const CNodePtr &node, const OperatorInfoPtr &distribute_operator, const CNodePtr &insert_node,
  503. const TensorRedistribution &tensor_redistribution, const CNodePtr &pre_node) {
  504. MS_EXCEPTION_IF_NULL(node->func_graph());
  505. FuncGraphManagerPtr manager = node->func_graph()->manager();
  506. MS_EXCEPTION_IF_NULL(manager);
  507. AnfNodeIndexSet node_set = manager->node_users()[node];
  508. CNodePtr insert_node_new;
  509. if (IsPrimitiveCNode(node, prim::kPrimSend)) {
  510. return;
  511. }
  512. if (AnfNodeIsPrimitive(node, MAKE_TUPLE) || AnfNodeIsPrimitive(node, MAKE_LIST)) {
  513. MS_LOG(INFO) << "No need to insert redistribution op between make_tuple node and the next node";
  514. return;
  515. }
  516. if (IsValueNode<Primitive>(node->input(0))) {
  517. auto current_value = node->input(0)->cast<ValueNodePtr>();
  518. MS_EXCEPTION_IF_NULL(current_value);
  519. PrimitivePtr current_prim = current_value->value()->cast<PrimitivePtr>();
  520. MS_EXCEPTION_IF_NULL(current_prim);
  521. insert_node_new = ((current_prim->name() == prim::kTupleGetItem) ? node : insert_node);
  522. } else {
  523. insert_node_new = insert_node;
  524. }
  525. MS_EXCEPTION_IF_NULL(insert_node_new);
  526. for (auto &node_pair : node_set) {
  527. CNodePtr use_cnode = node_pair.first->cast<CNodePtr>();
  528. MS_EXCEPTION_IF_NULL(use_cnode);
  529. if (!IsValueNode<Primitive>(use_cnode->input(0))) {
  530. StepRedistribution(use_cnode, distribute_operator, insert_node_new, tensor_redistribution, pre_node);
  531. } else {
  532. ValueNodePtr prim_anf_node = use_cnode->input(0)->cast<ValueNodePtr>();
  533. MS_EXCEPTION_IF_NULL(prim_anf_node);
  534. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  535. MS_EXCEPTION_IF_NULL(node_prim);
  536. if ((node_prim->name() == DEPEND && node_pair.second != 1) || node_prim->name() == UPDATESTATE) {
  537. continue;
  538. }
  539. if (IsParallelCareNode(use_cnode) && use_cnode->has_user_data<OperatorInfo>()) {
  540. Redistribution(node_pair, distribute_operator, insert_node_new, node_pair.second, tensor_redistribution,
  541. pre_node);
  542. } else {
  543. StepRedistribution(use_cnode, distribute_operator, insert_node_new, tensor_redistribution, pre_node);
  544. }
  545. }
  546. }
  547. }
  548. void SplitTensor(const AnfNodePtr &node, const CNodePtr &next_node, int64_t index) {
  549. MS_EXCEPTION_IF_NULL(node);
  550. MS_EXCEPTION_IF_NULL(next_node);
  551. OperatorInfoPtr op_info = next_node->user_data<OperatorInfo>();
  552. MS_EXCEPTION_IF_NULL(op_info);
  553. // If the shape of tensor is [] or [1], no need to split it.
  554. Shapes shapes = GetNodeShape(node);
  555. if (shapes.size() != 1) {
  556. MS_LOG(EXCEPTION) << "Split tensor for " << op_info->name()
  557. << ": GetNodeShape for tensor_node, output size is not 1";
  558. }
  559. Shape shape = shapes[0];
  560. std::string shape_str = ShapeToString(shape);
  561. if (shape.empty() || ((shape.size() == 1) && (shape[0] == 1))) {
  562. MS_LOG(INFO) << "Split tensor for " << op_info->name() << ": The shape is " << shape_str
  563. << ", no need to split it.";
  564. return;
  565. }
  566. MS_LOG(INFO) << "Split tensor for " << op_info->name() << ": The shape of tensor is " << shape_str;
  567. // extract tensor layout
  568. if (LongToSize(index - 1) >= op_info->inputs_tensor_info().size()) {
  569. MS_LOG(EXCEPTION) << "The index is out of range, index is " << (index - 1) << ", vector size is "
  570. << op_info->inputs_tensor_info().size();
  571. }
  572. TensorInfo tensor_info = op_info->inputs_tensor_info()[LongToSize(index - 1)];
  573. TensorLayout tensor_layout = tensor_info.tensor_layout();
  574. // Use _GetTensorSlice operator to split the tensor
  575. FuncGraphPtr func_graph = next_node->func_graph(); // only cnode can get the graph
  576. MS_EXCEPTION_IF_NULL(func_graph);
  577. Operator op = CreateGetTensorSliceOp(tensor_layout);
  578. InsertGetTensorSliceOp(op, next_node, func_graph, index, SPLIT_TENSOR);
  579. if (!op_info->sub_ops().empty()) {
  580. auto sub_ops = op_info->sub_ops();
  581. for (size_t i = 0; i < sub_ops.size(); i++) {
  582. if (!sub_ops.at(i).empty()) {
  583. InsertGetTensorSliceOp(sub_ops.at(i).at(0), next_node, func_graph, index, SUB);
  584. }
  585. }
  586. }
  587. }
  588. void SplitTensorList(const AnfNodePtr &node, const CNodePtr &next_node, int index) {
  589. MS_EXCEPTION_IF_NULL(node);
  590. MS_EXCEPTION_IF_NULL(next_node);
  591. if (next_node->inputs().size() != 2 || index != 1) {
  592. MS_LOG(INFO) << next_node->fullname_with_scope() << " Inputs must have only one input, get "
  593. << (next_node->inputs().size() - 1) << " index should be 1, get " << index;
  594. return;
  595. }
  596. OperatorInfoPtr op_info = next_node->user_data<OperatorInfo>();
  597. MS_EXCEPTION_IF_NULL(op_info);
  598. std::vector<ValuePtr> inputs_values;
  599. if (IsValueNode<ValueList>(node)) {
  600. inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueListPtr>()->value();
  601. } else {
  602. inputs_values = node->cast<ValueNodePtr>()->value()->cast<ValueTuplePtr>()->value();
  603. }
  604. if (inputs_values.size() != op_info->inputs_tensor_info().size()) {
  605. MS_LOG(EXCEPTION) << "The inputs size " << inputs_values.size() << ", is not equal to inputs shape size "
  606. << op_info->inputs_tensor_info().size();
  607. }
  608. std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple)};
  609. FuncGraphPtr func_graph = next_node->func_graph();
  610. MS_EXCEPTION_IF_NULL(func_graph);
  611. FuncGraphManagerPtr manager = func_graph->manager();
  612. MS_EXCEPTION_IF_NULL(manager);
  613. ScopePtr scope = next_node->scope();
  614. MS_EXCEPTION_IF_NULL(scope);
  615. for (size_t i = 0; i < inputs_values.size(); ++i) {
  616. auto value_ptr = inputs_values[i];
  617. auto tensor = value_ptr->cast<tensor::TensorPtr>();
  618. MS_EXCEPTION_IF_NULL(tensor);
  619. TensorInfo tensor_info = op_info->inputs_tensor_info()[i];
  620. TensorLayout tensor_layout = tensor_info.tensor_layout();
  621. auto value_node = NewValueNode(value_ptr)->cast<AnfNodePtr>();
  622. Operator op = CreateGetTensorSliceOp(tensor_layout);
  623. std::vector<AnfNodePtr> node_input = CreateInput(op, value_node, SPLIT_TENSOR);
  624. CNodePtr new_node = func_graph->NewCNode(node_input);
  625. new_node->set_in_forward_flag(true);
  626. auto new_node_value = node_input[0]->cast<ValueNodePtr>();
  627. MS_EXCEPTION_IF_NULL(new_node_value);
  628. PrimitivePtr new_node_prim = new_node_value->value()->cast<PrimitivePtr>();
  629. new_node_prim->set_instance_name(SPLIT_TENSOR);
  630. new_node_prim->set_attr("keep_value_node_input", MakeValue(true));
  631. new_node->set_scope(scope);
  632. node_input[0]->set_scope(scope);
  633. make_tuple_inputs.push_back(new_node);
  634. }
  635. CNodePtr make_tuple = func_graph->NewCNode(make_tuple_inputs);
  636. manager->Replace(node, make_tuple);
  637. }
  638. void StepSplitTensor(const AnfNodePtr &node, const FuncGraphManagerPtr &manager) {
  639. MS_EXCEPTION_IF_NULL(node);
  640. MS_EXCEPTION_IF_NULL(manager);
  641. AnfNodeIndexSet node_set = manager->node_users()[node];
  642. for (auto &node_pair : node_set) {
  643. CNodePtr use_cnode = node_pair.first->cast<CNodePtr>();
  644. if (use_cnode == nullptr || !IsValueNode<Primitive>(use_cnode->input(0))) {
  645. continue;
  646. }
  647. ValueNodePtr prim_anf_node = use_cnode->input(0)->cast<ValueNodePtr>();
  648. MS_EXCEPTION_IF_NULL(prim_anf_node);
  649. PrimitivePtr use_cnode_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  650. MS_EXCEPTION_IF_NULL(use_cnode_prim);
  651. if ((use_cnode_prim->name() == DEPEND && node_pair.second != 1) ||
  652. NO_INPUT_TENSOR_OPS.find(use_cnode_prim->name()) != NO_INPUT_TENSOR_OPS.end()) {
  653. continue;
  654. }
  655. if (IsParallelCareNode(use_cnode)) {
  656. if (IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  657. SplitTensorList(node, use_cnode, node_pair.second);
  658. } else {
  659. SplitTensor(node, use_cnode, node_pair.second);
  660. }
  661. }
  662. }
  663. }
  664. void StepReplaceOp(OperatorVector replace_op, const CNodePtr &node) {
  665. // step1:get graph manager distribute_operator
  666. OperatorInfoPtr distribute_operator = node->user_data<OperatorInfo>();
  667. if (distribute_operator == nullptr) {
  668. MS_LOG(EXCEPTION) << "Failure:AddNode error since distribute_operator is nullptr";
  669. }
  670. FuncGraphPtr func_graph = node->func_graph();
  671. MS_EXCEPTION_IF_NULL(func_graph);
  672. FuncGraphManagerPtr manager = func_graph->manager();
  673. if (manager == nullptr) {
  674. MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr";
  675. }
  676. // When reshape(bool), insert cast in the begin and end of op_list to avoid AllGather(bool).
  677. auto reshape_type_str = node->abstract()->BuildType()->ToString();
  678. auto replace_op_info = distribute_operator->replace_op_info();
  679. if (reshape_type_str.find(BOOL) != std::string::npos) {
  680. auto cast_int = CreateCastOp(kInt32);
  681. auto cast_bool = CreateCastOp(kBool);
  682. (void)replace_op.insert(replace_op.begin(), cast_int);
  683. (void)replace_op.insert(replace_op.end(), cast_bool);
  684. (void)replace_op_info.insert(replace_op_info.begin(), {false, 1});
  685. (void)replace_op_info.insert(replace_op_info.end(), {false, 1});
  686. }
  687. // step2:traverse op_list and insert node
  688. std::reverse(replace_op.begin(), replace_op.end());
  689. std::reverse(replace_op_info.begin(), replace_op_info.end());
  690. if (!replace_op_info.empty() && replace_op_info.size() != replace_op.size()) {
  691. MS_LOG(EXCEPTION) << "replace_op_info is not empty and size not equal to replace_op!";
  692. }
  693. bool replace_op_info_flag = !replace_op_info.empty();
  694. for (size_t index = 0; index < replace_op.size(); ++index) {
  695. std::string instance_name = CreateInstanceName(node, index);
  696. std::vector<AnfNodePtr> replace_input;
  697. if (index != replace_op.size() - 1) {
  698. replace_input = CreateInput(replace_op[index], node, instance_name);
  699. } else {
  700. replace_input = ReplaceOpInput(replace_op[index], instance_name, node);
  701. }
  702. CNodePtr replace_node = func_graph->NewCNode(replace_input);
  703. MS_EXCEPTION_IF_NULL(replace_node);
  704. ScopePtr scope = node->scope();
  705. MS_EXCEPTION_IF_NULL(scope);
  706. replace_node->set_scope(scope);
  707. PrimitivePtr prim = GetValueNode<PrimitivePtr>(replace_node->input(0));
  708. PrimitivePtr origin_prim = GetValueNode<PrimitivePtr>(node->input(0));
  709. SetUserAttrs(origin_prim->attrs(), prim);
  710. auto origin_prim_attrs = origin_prim->attrs();
  711. if (origin_prim_attrs.find(RECOMPUTE_COMM_OP) != origin_prim_attrs.end() &&
  712. !GetValue<bool>(origin_prim_attrs[RECOMPUTE_COMM_OP]) &&
  713. COMMUNICATION_OPS.find(prim->name()) != COMMUNICATION_OPS.end()) {
  714. MS_LOG(INFO) << "The redistribution node in reshape would not be recomputed.";
  715. prim->set_attr("recompute", MakeValue(false));
  716. }
  717. if (index == replace_op.size() - 1) {
  718. replace_node->set_user_data<OperatorInfo>(node->user_data<OperatorInfo>());
  719. replace_node->set_primal_attrs(node->primal_attrs());
  720. }
  721. replace_node->set_in_forward_flag(true);
  722. replace_input[0]->set_scope(scope);
  723. if (replace_op_info_flag && replace_op_info[index].first) {
  724. auto new_cnode = InsertMakeTuple(replace_node, replace_op_info[index].second, func_graph);
  725. new_cnode->set_primal_attrs(node->primal_attrs());
  726. (void)manager->Replace(node, new_cnode); // using Replace function to insert node
  727. } else {
  728. (void)manager->Replace(node, replace_node); // using Replace function to insert node
  729. }
  730. }
  731. MS_LOG(INFO) << "Insert ReplaceOp success for " << distribute_operator->name();
  732. }
  733. void StepReplaceGraph(const ReplaceGraphPtr &replace_graph, const CNodePtr &node) {
  734. MS_EXCEPTION_IF_NULL(replace_graph);
  735. MS_EXCEPTION_IF_NULL(node);
  736. MS_EXCEPTION_IF_NULL(replace_graph->second);
  737. FuncGraphPtr func_graph = node->func_graph();
  738. MS_EXCEPTION_IF_NULL(func_graph);
  739. FuncGraphManagerPtr manager = func_graph->manager();
  740. if (manager == nullptr) {
  741. MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr";
  742. }
  743. // Solve the input order
  744. // For example input_node:{segment_sum:1, segment_sum:2, gahter:2}
  745. // The Original code here will bind the all operations to the first inputs of these operatos
  746. // However, the segment_sum operation needs two inputs, To solve this
  747. // We maintain a dict to count the times of the same operations,
  748. // and bind the inputs according to the times of the op appears.
  749. mindspore::HashMap<AnfNodePtr, int> input_map = {};
  750. static int appear_count = 0;
  751. for (auto &replace_input : replace_graph->first) {
  752. auto pre_node = node->input(LongToSize(replace_input.second));
  753. auto it = input_map.find(replace_input.first);
  754. if (it != input_map.end()) {
  755. appear_count = 1 + it->second;
  756. } else {
  757. appear_count = 1;
  758. }
  759. auto replace_input_cnode = replace_input.first->cast<CNodePtr>();
  760. size_t inputs_size = replace_input_cnode->inputs().size();
  761. while (IntToSize(appear_count) < inputs_size && replace_input_cnode->input(appear_count)->func_graph() != nullptr) {
  762. ++appear_count;
  763. }
  764. if (IntToSize(appear_count) >= inputs_size) {
  765. MS_LOG(EXCEPTION) << "No replaceable virtual_input_node";
  766. }
  767. input_map[replace_input.first] = appear_count;
  768. manager->SetEdge(replace_input.first, appear_count, pre_node);
  769. }
  770. // "(void)manager->Replace(replace_graph->first, pre_node);" can not be called
  771. auto replace_output = replace_graph->second->cast<CNodePtr>();
  772. MS_EXCEPTION_IF_NULL(replace_output);
  773. replace_output->set_primal_attrs(node->primal_attrs());
  774. (void)manager->Replace(node, replace_output);
  775. }
  776. int64_t GetTupleGetItemIndex(const CNodePtr &cnode) {
  777. MS_EXCEPTION_IF_NULL(cnode);
  778. if (cnode->inputs().size() != 3) {
  779. MS_LOG(EXCEPTION) << cnode->ToString() << " size( " << cnode->inputs().size() << " ) is not 3";
  780. }
  781. if (!cnode->input(TUPLE_GETITEM_INDEX_POS)->isa<ValueNode>()) {
  782. MS_LOG(EXCEPTION) << "The index of tuple getitem is not a value node";
  783. }
  784. ValuePtr tuple_index_value = GetValueNode(cnode->input(TUPLE_GETITEM_INDEX_POS));
  785. MS_EXCEPTION_IF_NULL(tuple_index_value);
  786. if (!tuple_index_value->isa<Int64Imm>()) {
  787. MS_LOG(EXCEPTION) << "The index of tuple getitem is not int32";
  788. }
  789. return tuple_index_value->cast<Int64ImmPtr>()->value();
  790. }
  791. void InsertVirtualDivOp(const VirtualDivOp &virtual_div_op, const CNodePtr &node) {
  792. MS_EXCEPTION_IF_NULL(node);
  793. size_t node_size = node->inputs().size();
  794. FuncGraphPtr func_graph = node->func_graph();
  795. MS_EXCEPTION_IF_NULL(func_graph);
  796. FuncGraphManagerPtr manager = func_graph->manager();
  797. MS_EXCEPTION_IF_NULL(manager);
  798. if (IsSomePrimitive(node, DROPOUT_DO_MASK)) {
  799. MS_LOG(INFO) << "Handle dropout do mask, only insert the virtual div to input[0]";
  800. node_size = 2;
  801. }
  802. for (size_t index = 1; index < node_size; ++index) {
  803. AnfNodePtr input = node->input(index);
  804. MS_EXCEPTION_IF_NULL(input);
  805. // if it is not a tensor, continue
  806. if ((!input->isa<CNode>() && !input->isa<Parameter>()) || HasAbstractMonad(input)) {
  807. MS_LOG(INFO) << "insert div op: the index " << index << " is not tensor, skip";
  808. continue;
  809. }
  810. for (size_t pos = 0; pos < virtual_div_op.size(); ++pos) {
  811. std::string instance_name = CreateInstanceName(node, pos);
  812. InsertNode(virtual_div_op[pos], node, index, node->input(index), func_graph, instance_name);
  813. }
  814. MS_LOG(INFO) << "insert div op for input index " << index << " of node";
  815. }
  816. }
  817. void InsertRealDivOpToNodeInput(const CNodePtr &node, int64_t scale, const string &instance_name) {
  818. MS_EXCEPTION_IF_NULL(node);
  819. if (scale == 0) {
  820. MS_LOG(EXCEPTION) << "Find the scale value is 0, you should check the mirror operators's group size.";
  821. }
  822. size_t node_size = node->inputs().size();
  823. FuncGraphPtr func_graph = node->func_graph();
  824. MS_EXCEPTION_IF_NULL(func_graph);
  825. // instance the real div operator
  826. Operator div_op = CreateDivOp(scale);
  827. // Insert it as the input of the node
  828. for (size_t index = 1; index < node_size; ++index) {
  829. AnfNodePtr input = node->input(index);
  830. MS_EXCEPTION_IF_NULL(input);
  831. // if it is not a tensor, continue
  832. if ((!input->isa<CNode>() && !input->isa<Parameter>()) || HasAbstractMonad(input)) {
  833. continue;
  834. }
  835. InsertNode(div_op, node, index, node->input(index), func_graph, instance_name);
  836. }
  837. }
  838. void InsertAllReduceToNodeInput(const CNodePtr &node, const std::string &group, const std::string &instance_name) {
  839. MS_EXCEPTION_IF_NULL(node);
  840. size_t node_size = node->inputs().size();
  841. FuncGraphPtr func_graph = node->func_graph();
  842. MS_EXCEPTION_IF_NULL(func_graph);
  843. // instance the real div operator
  844. CheckGlobalDeviceManager();
  845. Operator allreduce_op = CreateAllReduceOp(REDUCE_OP_SUM, group);
  846. // Insert it as the input of the node
  847. for (size_t index = 1; index < node_size; ++index) {
  848. AnfNodePtr input = node->input(index);
  849. MS_EXCEPTION_IF_NULL(input);
  850. // if it is not a tensor, continue
  851. if ((!input->isa<CNode>() && !input->isa<Parameter>()) || HasAbstractMonad(input)) {
  852. continue;
  853. }
  854. InsertNode(allreduce_op, node, index, node->input(index), func_graph, instance_name);
  855. }
  856. }
  857. FuncGraphPtr PynativeParallelGraph(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
  858. FuncGraphPtr real_graph = root;
  859. for (auto &node : all_nodes) {
  860. if (!node->isa<CNode>()) {
  861. continue;
  862. }
  863. auto cnode = node->cast<CNodePtr>();
  864. if (!IsValueNode<Primitive>(cnode->input(0))) {
  865. continue;
  866. }
  867. auto expect_shard_prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  868. if (expect_shard_prim->name() != SHARD) {
  869. continue;
  870. }
  871. real_graph = GetValueNode<FuncGraphPtr>(cnode->input(1));
  872. }
  873. return real_graph;
  874. }
  875. void InsertVirtualOutput(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
  876. std::vector<std::string> last_forward_node_ids;
  877. std::vector<size_t> last_indexs;
  878. auto real_graph = PynativeParallelGraph(root, all_nodes);
  879. FindLastNodesUniqueId(real_graph, &last_forward_node_ids, &last_indexs);
  880. MS_LOG(INFO) << "there are " << last_forward_node_ids.size() << " output nodes in eval/predict";
  881. for (auto &node : all_nodes) {
  882. // here insert virtualoutput node
  883. auto cnode = node->cast<CNodePtr>();
  884. if (cnode == nullptr) {
  885. continue;
  886. }
  887. auto last_node_iter = std::find(last_forward_node_ids.begin(), last_forward_node_ids.end(), cnode->UniqueId());
  888. if (last_node_iter == last_forward_node_ids.end()) {
  889. continue;
  890. }
  891. for (size_t last_node_index = 0; last_node_index < last_forward_node_ids.size(); ++last_node_index) {
  892. if (last_forward_node_ids[last_node_index] != cnode->UniqueId()) {
  893. continue;
  894. }
  895. MS_LOG(INFO) << "find last node: " << cnode->fullname_with_scope() << ", the parallel care node is: "
  896. << cnode->input(last_indexs[last_node_index])->fullname_with_scope();
  897. if (IsPrimitiveCNode(cnode, prim::kPrimTupleGetItem)) {
  898. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  899. MS_EXCEPTION_IF_NULL(manager);
  900. auto node_pair = manager->node_users()[cnode].front();
  901. if (!node_pair.first->isa<CNode>()) {
  902. MS_LOG(EXCEPTION) << "the output of tuple_get_item is not a cnode";
  903. }
  904. cnode = node_pair.first->cast<CNodePtr>();
  905. last_indexs[last_node_index] = IntToSize(node_pair.second);
  906. }
  907. auto pre_node = cnode->input(last_indexs[last_node_index]);
  908. Shapes shape_outputs = GetNodeShape(pre_node);
  909. if (shape_outputs[0].empty()) {
  910. continue;
  911. }
  912. FuncGraphPtr func_graph = node->func_graph();
  913. MS_EXCEPTION_IF_NULL(func_graph);
  914. OperatorParams params;
  915. OperatorAttrs attrs;
  916. OperatorArgs args = std::make_pair(attrs, params);
  917. Operator op = std::make_pair(VIRTUAL_OUTPUT, args);
  918. InsertNode(op, cnode, last_indexs[last_node_index], pre_node, func_graph, VIRTUAL_OUTPUT);
  919. auto virtual_output_node = cnode->input(last_indexs[last_node_index]);
  920. AbstractBasePtr virtual_output_abstract = pre_node->abstract()->Clone();
  921. std::shared_ptr<abstract::BaseShape> virtual_output_shape = std::make_shared<abstract::Shape>(shape_outputs[0]);
  922. virtual_output_abstract->set_shape(virtual_output_shape);
  923. virtual_output_node->set_abstract(virtual_output_abstract);
  924. }
  925. }
  926. }
  927. // only used for FindCNode
  928. CNodePtr SkipTrivialNodesMoveDown(const FuncGraphManagerPtr &manager, CNodePtr node) {
  929. MS_EXCEPTION_IF_NULL(node);
  930. while (IsInTrivialNodeList(node) || IsSomePrimitive(node, LOAD)) {
  931. node = manager->node_users()[node].begin()->first->cast<CNodePtr>();
  932. }
  933. return node;
  934. }
  935. std::pair<bool, CNodePtr> FindCNode(const AnfNodePtr &anode, const std::string &name, const FuncGraphPtr &func_graph,
  936. size_t max_depth) {
  937. MS_EXCEPTION_IF_NULL(anode);
  938. MS_EXCEPTION_IF_NULL(anode->func_graph());
  939. FuncGraphManagerPtr manager = anode->func_graph()->manager();
  940. MS_EXCEPTION_IF_NULL(manager);
  941. if (max_depth > MAX_RECURSIVE_DEPTH) {
  942. MS_LOG(EXCEPTION) << "Recursive call is larger than 100000.";
  943. }
  944. AnfNodeIndexSet node_set = manager->node_users()[anode];
  945. bool result = false;
  946. CNodePtr cnode_return = nullptr;
  947. for (auto &node_pair : node_set) {
  948. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  949. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  950. continue;
  951. }
  952. use_apply = SkipTrivialNodesMoveDown(manager, use_apply);
  953. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  954. continue;
  955. }
  956. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  957. MS_EXCEPTION_IF_NULL(prim_anf_node);
  958. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  959. MS_EXCEPTION_IF_NULL(node_prim);
  960. if (node_prim->name() == name && node_pair.second == 1) {
  961. if (use_apply->func_graph() == func_graph) {
  962. result = true;
  963. cnode_return = use_apply;
  964. MS_LOG(INFO) << "Find Primitive " << name << " in the same func_graph";
  965. continue;
  966. }
  967. MS_LOG(INFO) << "Find Primitive " << name << " in different func_graph";
  968. }
  969. if (ParallelContext::GetInstance()->enable_parallel_optimizer() && IsInAllGatherNodeList(use_apply)) {
  970. return FindCNode(node_pair.first, name, func_graph, max_depth + 1);
  971. }
  972. }
  973. return std::make_pair(result, cnode_return);
  974. }
  975. bool InsertMirrorBeforeCast(const CNodePtr &node, size_t index) {
  976. // only if gradient_fp32_sync is true, pre node is cast and type is not float32 return true
  977. if (!ParallelContext::GetInstance()->gradient_fp32_sync()) {
  978. return false;
  979. }
  980. auto pre_node = node->input(index);
  981. MS_EXCEPTION_IF_NULL(pre_node);
  982. auto cnode = pre_node->cast<CNodePtr>();
  983. if (cnode == nullptr || !IsValueNode<Primitive>(cnode->input(0))) {
  984. return false;
  985. }
  986. if (ParallelContext::GetInstance()->enable_parallel_optimizer() && IsInAllGatherNodeList(cnode)) {
  987. pre_node = cnode->input(1);
  988. }
  989. if (!IsPrimitiveCNode(pre_node, prim::kPrimCast)) {
  990. return false;
  991. }
  992. auto node_type = pre_node->Type();
  993. MS_EXCEPTION_IF_NULL(node_type);
  994. if (!node_type->isa<mindspore::TensorType>()) {
  995. MS_LOG(EXCEPTION) << "Unknown type.";
  996. }
  997. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  998. MS_EXCEPTION_IF_NULL(input_element_type);
  999. auto type_id = input_element_type->type_id();
  1000. return (type_id != kNumberTypeFloat32);
  1001. }
  1002. static bool CheckInsertMirrorOps(const MirrorOps &mirror_ops, const CNodePtr &node, size_t node_size) {
  1003. if (IsPrimitiveCNode(node, prim::kPrimSend)) {
  1004. return true;
  1005. }
  1006. constexpr size_t kSingleArgCNodeSize = 2;
  1007. if ((node->inputs().size() == kSingleArgCNodeSize) && (IsValueNode<ValueSequence>(node->input(1)))) {
  1008. MS_LOG(INFO) << "Input is ValueList, skip it.";
  1009. return false;
  1010. }
  1011. if ((node->inputs().size() == kSingleArgCNodeSize) &&
  1012. (AnfNodeIsPrimitive(node->input(1), MAKE_TUPLE) || AnfNodeIsPrimitive(node->input(1), MAKE_LIST))) {
  1013. MS_LOG(INFO) << "The mirror for " << GetPrimName(node) << " has handle by make_tuple node";
  1014. return false;
  1015. }
  1016. if (mirror_ops.size() != node_size - 1) {
  1017. MS_LOG(EXCEPTION) << "Mirrorops's size is wrong! mirror_ops size is " << mirror_ops.size() << ", node_size is "
  1018. << (node_size - 1);
  1019. }
  1020. return true;
  1021. }
  1022. // only used for InsertMirrorOps
  1023. CNodePtr SkipTrivialNodesMoveUp(CNodePtr node) {
  1024. MS_EXCEPTION_IF_NULL(node);
  1025. while (!IsSomePrimitive(node, LOAD)) {
  1026. if (IsInTrivialNodeList(node) || IsInAllGatherNodeList(node)) {
  1027. node = node->input(1)->cast<CNodePtr>();
  1028. }
  1029. }
  1030. auto prev_node = node->input(1)->cast<CNodePtr>();
  1031. if (prev_node != nullptr) {
  1032. if (IsSomePrimitive(prev_node, DEPEND)) {
  1033. auto prev_prev_node = prev_node->input(1)->cast<CNodePtr>();
  1034. if (IsSomePrimitive(node, LOAD)) {
  1035. node = prev_prev_node;
  1036. MS_LOG(INFO) << "Moving to the Load node before Depend node.";
  1037. }
  1038. }
  1039. }
  1040. return node;
  1041. }
  1042. std::string MirrorOpName() {
  1043. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  1044. int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  1045. std::string mirror_op_name;
  1046. if (grad_accumulation_step > 1) {
  1047. mirror_op_name = MIRROR_MINI_STEP_OPERATOR;
  1048. } else if (split_stage_num > 1) {
  1049. mirror_op_name = MIRROR_MICRO_STEP_OPERATOR;
  1050. } else {
  1051. mirror_op_name = MIRROR_OPERATOR;
  1052. }
  1053. return mirror_op_name;
  1054. }
  1055. static void DoInsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node,
  1056. size_t node_size) {
  1057. FuncGraphPtr func_graph = node->func_graph();
  1058. MS_EXCEPTION_IF_NULL(func_graph);
  1059. FuncGraphManagerPtr manager = func_graph->manager();
  1060. MS_EXCEPTION_IF_NULL(manager);
  1061. for (size_t index = 1; index < node_size; ++index) {
  1062. OperatorVector backward_op = mirror_ops[index - 1];
  1063. if (IsPrimitiveCNode(node, prim::kPrimSend)) {
  1064. auto param_index = GetValue<int>(node->GetPrimalAttr(PARAM_INDEX));
  1065. backward_op = mirror_ops[IntToSize(param_index)];
  1066. }
  1067. if (backward_op.empty()) {
  1068. continue;
  1069. }
  1070. std::pair<AnfNodePtr, bool> param_node_pair = FindParameter(node->input(index), func_graph);
  1071. if (!param_node_pair.first) {
  1072. continue;
  1073. }
  1074. auto param_ptr = param_node_pair.first->cast<ParameterPtr>();
  1075. std::string param_name;
  1076. bool is_shared_param = false;
  1077. if (param_ptr) {
  1078. param_name = param_ptr->name();
  1079. if (!param_ptr->param_info() || !param_ptr->param_info()->requires_grad()) {
  1080. MS_LOG(INFO) << param_name << " do not need gradient. Skip inserting mirror.";
  1081. continue;
  1082. }
  1083. std::string opt_shard_mirror_group;
  1084. if (param_ptr->user_data<TensorLayout>()) {
  1085. opt_shard_mirror_group = param_ptr->user_data<TensorLayout>()->opt_shard_mirror_group();
  1086. is_shared_param = param_ptr->user_data<TensorLayout>()->is_shared_param();
  1087. }
  1088. if (!opt_shard_mirror_group.empty()) {
  1089. // mirror ops is covered in not fully use opt shard case
  1090. backward_op = CreateMirrorOps(opt_shard_mirror_group, static_cast<size_t>(opt_shard_mirror_group[0]));
  1091. }
  1092. }
  1093. // not a RefKey
  1094. std::string mirror_op_name = MirrorOpName();
  1095. AnfNodePtr pre_node = node->input(index);
  1096. if (!param_node_pair.second) {
  1097. auto next_cnode = FindCNode(param_node_pair.first, mirror_op_name, func_graph, 0);
  1098. // if there is already a MirrorOp in the same graph, use MirrorOp CNode as a input instead
  1099. if (next_cnode.first) {
  1100. MS_EXCEPTION_IF_NULL(next_cnode.second);
  1101. // assume Load is inserted next to parameter
  1102. // skip Load moving up and insert mirror next to the parameter
  1103. if (pre_node->cast<CNodePtr>()) {
  1104. CNodePtr load_node = SkipTrivialNodesMoveUp(node->input(index)->cast<CNodePtr>());
  1105. manager->SetEdge(load_node, 1, next_cnode.second);
  1106. } else {
  1107. manager->SetEdge(node, static_cast<int>(index), next_cnode.second);
  1108. }
  1109. MS_LOG(INFO) << "Find parameter " << param_name << " for node " << GetPrimName(node->cast<CNodePtr>())
  1110. << " and share the mirror.";
  1111. continue;
  1112. }
  1113. }
  1114. // if the parameter found is a RefKey, or no MirrorOp is found in the same graph, insert a new MirrorOp
  1115. // only one MirrorOp in backward_op
  1116. if (backward_op.size() != 1) {
  1117. MS_LOG(EXCEPTION) << "backward_op size must be 1, real is " << backward_op.size();
  1118. }
  1119. auto op = backward_op[0];
  1120. if (pre_node->cast<CNodePtr>() && (InsertMirrorBeforeCast(node, index) || is_shared_param)) {
  1121. // assume Load is inserted next to parameter
  1122. // skip Load moving up and insert mirror next to the parameter
  1123. CNodePtr load_node = SkipTrivialNodesMoveUp(pre_node->cast<CNodePtr>());
  1124. InsertNode(op, load_node, 1, load_node->input(1), func_graph, mirror_op_name, param_name, root);
  1125. auto comm_op = load_node->input(1)->cast<CNodePtr>();
  1126. // add fusion flag
  1127. AddCommOpFusionType(comm_op, param_node_pair.first);
  1128. MS_LOG(INFO) << "Find parameter " << param_name << " for node " << GetPrimName(node->cast<CNodePtr>())
  1129. << " and insert mirror before Load";
  1130. AddCommOpParamFlag(comm_op);
  1131. continue;
  1132. }
  1133. InsertNode(op, node, index, pre_node, func_graph, mirror_op_name, param_name, root);
  1134. MS_LOG(INFO) << "Find parameter " << param_name << " for node " << GetPrimName(node->cast<CNodePtr>())
  1135. << " and insert mirror before the node";
  1136. auto comm_op = node->input(index)->cast<CNodePtr>();
  1137. // add fusion flag
  1138. // pipeline mirror would not be set, which should be supported later
  1139. AddCommOpFusionType(comm_op, param_node_pair.first);
  1140. AddCommOpParamFlag(comm_op);
  1141. }
  1142. }
  1143. void InsertMirrorOps(const FuncGraphPtr &root, const MirrorOps &mirror_ops, const CNodePtr &node) {
  1144. MS_EXCEPTION_IF_NULL(node);
  1145. size_t node_size = node->inputs().size();
  1146. for (auto input : node->inputs()) {
  1147. if (HasAbstractMonad(input)) {
  1148. node_size--;
  1149. }
  1150. }
  1151. if (!CheckInsertMirrorOps(mirror_ops, node, node_size)) {
  1152. return;
  1153. }
  1154. DoInsertMirrorOps(root, mirror_ops, node, node_size);
  1155. }
  1156. void BackwardCommunication(const FuncGraphPtr &root, const OperatorInfoPtr &distribute_operator, const CNodePtr &node,
  1157. const std::vector<std::pair<CNodePtr, LossNodeInfo>> &sens_loss_pairs) {
  1158. MS_EXCEPTION_IF_NULL(distribute_operator);
  1159. MS_EXCEPTION_IF_NULL(node);
  1160. if (IsPrimitiveCNode(node, prim::kPrimReceive)) {
  1161. return;
  1162. }
  1163. bool is_loss_cnode =
  1164. std::any_of(sens_loss_pairs.begin(), sens_loss_pairs.end(),
  1165. [node](const std::pair<CNodePtr, LossNodeInfo> &element) { return element.second.loss_node == node; });
  1166. MirrorOps mirror_ops = distribute_operator->mirror_ops();
  1167. VirtualDivOp virtual_div_op = distribute_operator->virtual_div_op();
  1168. // insert mirror op
  1169. if (!mirror_ops.empty()) {
  1170. MS_LOG(INFO) << "insert mirror op for " << distribute_operator->name();
  1171. InsertMirrorOps(root, mirror_ops, node);
  1172. }
  1173. // insert virtual div op
  1174. if (!virtual_div_op.empty() && is_loss_cnode && IsLastStage()) {
  1175. MS_LOG(INFO) << "insert virtual div op for " << distribute_operator->name();
  1176. InsertVirtualDivOp(virtual_div_op, node);
  1177. }
  1178. }
  1179. std::string GetDisOpName(const std::string &prim_name) {
  1180. std::string op_name = prim_name;
  1181. if (!prim_name.empty() && (prim_name[0] == '_')) {
  1182. op_name = prim_name.substr(1);
  1183. }
  1184. return op_name + "Info";
  1185. }
  1186. OperatorInfoPtr OperatorInstanceByName(const std::string &name, const PrimitiveAttrs &attrs,
  1187. const std::vector<Shapes> &shape_list) {
  1188. if (shape_list.size() != 2) {
  1189. MS_LOG(ERROR) << "The size of shape list is not 2";
  1190. return nullptr;
  1191. }
  1192. if (name.length() == 0) {
  1193. MS_LOG(EXCEPTION) << "Length of name is zero!";
  1194. }
  1195. std::string distribute_opname = GetDisOpName(name);
  1196. OperatorInfoPtr operator_ =
  1197. (OperatorInfoPtr)DynCreator::Instance().Create(distribute_opname, shape_list[0], shape_list[1], attrs, TOTAL_OPS);
  1198. if (operator_ == nullptr) {
  1199. MS_LOG(INFO) << "Create " << name << " failed";
  1200. return nullptr;
  1201. }
  1202. std::string origin_name = operator_->name();
  1203. operator_->set_name(origin_name + std::to_string(TOTAL_OPS));
  1204. MS_LOG(INFO) << "Successfully created operator " << origin_name;
  1205. ++TOTAL_OPS;
  1206. return operator_;
  1207. }
  1208. OperatorInfoPtr OperatorInstance(const PrimitivePtr &prim, const PrimitiveAttrs &attrs,
  1209. const std::vector<Shapes> &shape_list) {
  1210. MS_EXCEPTION_IF_NULL(prim);
  1211. OperatorInfoPtr operator_ = OperatorInstanceByName(prim->name(), attrs, shape_list);
  1212. if (operator_ == nullptr) {
  1213. if (IsInBatchParallelBlackList(prim)) {
  1214. MS_LOG(EXCEPTION) << "Operator " << prim->name() << " is not supported yet in auto parallel mode.";
  1215. }
  1216. MS_LOG(INFO) << "Create " << prim->name() << " failed, use batch parallel";
  1217. operator_ = OperatorInstanceByName(BATCH_PARALLEL, attrs, shape_list);
  1218. MS_EXCEPTION_IF_NULL(operator_);
  1219. }
  1220. return operator_;
  1221. }
  1222. OperatorInfoPtr NewOperatorInstance(const PrimitivePtr &prim, const PrimitiveAttrs &attrs,
  1223. std::vector<Shapes> shape_list) {
  1224. OperatorInfoPtr operator_ = OperatorInstance(prim, attrs, shape_list);
  1225. for (size_t i = 0; i < shape_list[0].size(); ++i) {
  1226. MS_LOG(INFO) << "No: " << i << " input's shape: " << ShapeToString(shape_list[0][i]);
  1227. }
  1228. return operator_;
  1229. }
  1230. StrategyPtr ExtractStrategy(const ValuePtr &stra) {
  1231. if (stra == nullptr) {
  1232. return nullptr;
  1233. }
  1234. auto var = stra->cast<ValueTuplePtr>();
  1235. if (var == nullptr) {
  1236. return nullptr;
  1237. }
  1238. StrategyPtr strategyPtr;
  1239. int64_t stage_id = g_device_manager->stage_id();
  1240. MS_LOG(INFO) << "Extract information: strategy " << stra->ToString();
  1241. if (var->size() > 0) {
  1242. std::vector<ValuePtr> elements = var->value();
  1243. Strategys strategy;
  1244. for (uint64_t index = 0; index < elements.size(); ++index) {
  1245. Dimensions dim;
  1246. if (elements[index]->isa<ValueSequence>()) {
  1247. auto value_tuple = elements[index]->cast<ValueTuplePtr>();
  1248. std::vector<ValuePtr> value_vector = value_tuple->value();
  1249. (void)std::transform(value_vector.begin(), value_vector.end(), std::back_inserter(dim),
  1250. [](const ValuePtr &value) { return static_cast<int64_t>(GetValue<int64_t>(value)); });
  1251. strategy.push_back(dim);
  1252. } else {
  1253. MS_LOG(EXCEPTION) << "Failure: Strategy's format is wrong! Need ValueSequence";
  1254. }
  1255. }
  1256. if (strategy.empty()) {
  1257. MS_LOG(EXCEPTION) << "ExtractStrategy: failed to extract strategy";
  1258. }
  1259. strategyPtr = NewStrategy(stage_id, strategy);
  1260. }
  1261. return strategyPtr;
  1262. }
  1263. Shapes GetRefKeyNodeShape(const AnfNodePtr &node, const FuncGraphPtr &func_graph) {
  1264. MS_EXCEPTION_IF_NULL(node);
  1265. MS_EXCEPTION_IF_NULL(func_graph);
  1266. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(node, func_graph);
  1267. if (parameters.size() != 1) {
  1268. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  1269. }
  1270. Shapes input_shapes;
  1271. input_shapes = GetNodeShape(parameters[0]);
  1272. if (input_shapes.size() != 1) {
  1273. MS_LOG(EXCEPTION) << "Get input shape failed";
  1274. }
  1275. MS_LOG(INFO) << "The parameter shape is " << ShapeToString(input_shapes[0]);
  1276. return input_shapes;
  1277. }
  1278. std::vector<Shapes> ExtractShape(const CNodePtr &node) {
  1279. MS_EXCEPTION_IF_NULL(node);
  1280. Shapes shape_inputs, shape_outputs;
  1281. std::vector<Shapes> shape_all;
  1282. std::vector<AnfNodePtr> all_inputs = node->inputs();
  1283. size_t inputs_size = all_inputs.size();
  1284. for (size_t i = 1; i < inputs_size; ++i) {
  1285. Shapes input_shapes;
  1286. AnfNodePtr input = all_inputs[i];
  1287. if (HasAbstractMonad(input)) {
  1288. continue;
  1289. }
  1290. if (IsValueNode<RefKey>(input)) {
  1291. auto func_graph = node->func_graph();
  1292. MS_EXCEPTION_IF_NULL(func_graph);
  1293. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(input, func_graph);
  1294. if (parameters.size() != 1) {
  1295. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  1296. }
  1297. std::pair<AnfNodePtr, int64_t> node_pair = std::make_pair(node, SizeToLong(i));
  1298. g_RefMap[parameters[0]] = node_pair;
  1299. input_shapes = GetRefKeyNodeShape(input, func_graph);
  1300. } else if (input->isa<CNode>() || IsValueNode<Tensor>(input) || input->isa<Parameter>() ||
  1301. ((IsValueNode<ValueList>(input) || IsValueNode<ValueTuple>(input)) && (inputs_size == 2))) {
  1302. input_shapes = GetNodeShape(input);
  1303. } else {
  1304. continue;
  1305. }
  1306. if (input_shapes.size() != 1) {
  1307. if (inputs_size == 2) { // like concat
  1308. shape_inputs = input_shapes;
  1309. break;
  1310. } else {
  1311. MS_LOG(EXCEPTION) << "ExtractShape: Get input shape failed";
  1312. }
  1313. }
  1314. shape_inputs.push_back(input_shapes[0]);
  1315. }
  1316. shape_all.push_back(shape_inputs);
  1317. // extract out shape
  1318. shape_outputs = GetNodeShape(node);
  1319. shape_all.push_back(shape_outputs);
  1320. return shape_all;
  1321. }
  1322. std::pair<AnfNodePtr, int64_t> FindParallelCareNode(const AnfNodePtr &node, int32_t recursion_num) {
  1323. if (recursion_num >= RECURSION_LIMIT) {
  1324. return std::make_pair(nullptr, 0);
  1325. }
  1326. MS_EXCEPTION_IF_NULL(node);
  1327. FuncGraphPtr func_graph = node->func_graph();
  1328. MS_EXCEPTION_IF_NULL(func_graph);
  1329. FuncGraphManagerPtr manager = func_graph->manager();
  1330. MS_EXCEPTION_IF_NULL(manager);
  1331. AnfNodeIndexSet node_set = manager->node_users()[node];
  1332. for (auto &node_pair : node_set) {
  1333. CNodePtr cnode = node_pair.first->cast<CNodePtr>();
  1334. MS_EXCEPTION_IF_NULL(cnode);
  1335. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1336. continue;
  1337. }
  1338. ValueNodePtr prim_node_anf = cnode->input(0)->cast<ValueNodePtr>();
  1339. MS_EXCEPTION_IF_NULL(prim_node_anf);
  1340. PrimitivePtr node_prim = prim_node_anf->value()->cast<PrimitivePtr>();
  1341. MS_EXCEPTION_IF_NULL(node_prim);
  1342. if ((node_prim->name() == DEPEND && node_pair.second != 1) || IsPrimitiveCNode(cnode, prim::kPrimReceive) ||
  1343. IsPrimitiveCNode(cnode, prim::kPrimSend)) {
  1344. continue;
  1345. }
  1346. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>()) {
  1347. return node_pair;
  1348. } else {
  1349. auto tmp_pair = FindParallelCareNode(node_pair.first, recursion_num + 1);
  1350. if (tmp_pair.first != nullptr) {
  1351. return tmp_pair;
  1352. }
  1353. }
  1354. }
  1355. return std::make_pair(nullptr, 0);
  1356. }
  1357. std::pair<AnfNodePtr, int64_t> FindSubGraph(const FuncGraphPtr &graph, const AnfNodePtr &parameter) {
  1358. MS_EXCEPTION_IF_NULL(graph);
  1359. MS_EXCEPTION_IF_NULL(parameter);
  1360. FuncGraphManagerPtr manager = graph->manager();
  1361. MS_EXCEPTION_IF_NULL(manager);
  1362. std::pair<AnfNodePtr, int64_t> prim_anf_node_pair = FindParallelCareNode(parameter, 0);
  1363. if (prim_anf_node_pair.first != nullptr) {
  1364. return prim_anf_node_pair;
  1365. } else {
  1366. AnfNodeIndexSet param_sub_set = manager->node_users()[parameter];
  1367. for (auto &param_pair : param_sub_set) {
  1368. CNodePtr param_cnode = param_pair.first->cast<CNodePtr>();
  1369. AnfNodePtr graph_value_node;
  1370. if (param_cnode->input(0)->isa<CNode>()) {
  1371. graph_value_node = param_cnode->input(0)->cast<CNodePtr>()->input(1);
  1372. } else {
  1373. graph_value_node = param_cnode->input(0);
  1374. }
  1375. if (!IsValueNode<FuncGraph>(graph_value_node)) {
  1376. continue;
  1377. }
  1378. FuncGraphPtr graph_sub = GetValueNode<FuncGraphPtr>(graph_value_node);
  1379. auto parameters = graph_sub->parameters();
  1380. if (LongToSize(param_pair.second - 1) >= parameters.size()) {
  1381. MS_LOG(EXCEPTION) << "The index is out of range, index is: " << (param_pair.second - 1) << ", vector size is "
  1382. << parameters.size();
  1383. }
  1384. std::pair<AnfNodePtr, int64_t> res = FindSubGraph(graph_sub, parameters[LongToSize(param_pair.second - 1)]);
  1385. if (res.first != nullptr) {
  1386. return res;
  1387. }
  1388. }
  1389. }
  1390. return std::make_pair(nullptr, 0);
  1391. }
  1392. CNodePtr InsertAllGatherAfterCast(const CNodePtr &cnode) {
  1393. MS_EXCEPTION_IF_NULL(cnode);
  1394. auto graph = cnode->func_graph();
  1395. MS_EXCEPTION_IF_NULL(graph);
  1396. auto manager = graph->manager();
  1397. MS_EXCEPTION_IF_NULL(manager);
  1398. // skip Load moving down and assume it only has one node user
  1399. CNodePtr res = cnode;
  1400. if (IsSomePrimitive(res, LOAD)) {
  1401. res = manager->node_users()[cnode].begin()->first->cast<CNodePtr>();
  1402. }
  1403. // return true only if cnode is Cast from fp32 to fp16
  1404. if (!IsSomePrimitive(res, CAST)) {
  1405. return nullptr;
  1406. }
  1407. auto node_type = res->Type();
  1408. MS_EXCEPTION_IF_NULL(node_type);
  1409. if (!node_type->isa<mindspore::TensorType>()) {
  1410. MS_LOG(EXCEPTION) << "Unknown type.";
  1411. }
  1412. auto input_element_type = node_type->cast<mindspore::TensorTypePtr>()->element();
  1413. MS_EXCEPTION_IF_NULL(input_element_type);
  1414. auto type_id = input_element_type->type_id();
  1415. if (type_id != kNumberTypeFloat32) {
  1416. return res;
  1417. } else {
  1418. return nullptr;
  1419. }
  1420. }
  1421. static void InsertAllGatherOp(const FuncGraphPtr &root, const std::string &group, const std::pair<AnfNodePtr, int> &res,
  1422. const AnfNodePtr &node, const std::string &op_name, bool is_shared_param) {
  1423. MS_EXCEPTION_IF_NULL(res.first);
  1424. MS_EXCEPTION_IF_NULL(node);
  1425. bool grad_accumulation_shard = ParallelContext::GetInstance()->grad_accumulation_shard();
  1426. auto cnode = res.first->cast<CNodePtr>();
  1427. auto graph = cnode->func_graph();
  1428. MS_EXCEPTION_IF_NULL(graph);
  1429. auto manager = graph->manager();
  1430. MS_EXCEPTION_IF_NULL(manager);
  1431. auto cnode_prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  1432. MS_EXCEPTION_IF_NULL(cnode_prim);
  1433. Operator op;
  1434. CNodePtr allgather;
  1435. auto param_name = node->cast<ParameterPtr>()->name();
  1436. if (op_name == MINI_STEP_ALL_GATHER) {
  1437. op = CreateMiniStepAllGatherOp(group);
  1438. } else if (op_name == MICRO_STEP_ALL_GATHER) {
  1439. op = CreateMicroStepAllGatherOp(group);
  1440. } else {
  1441. op = CreateAllGatherOp(group);
  1442. }
  1443. CNodePtr cast_node = InsertAllGatherAfterCast(cnode);
  1444. std::string opt_shard_mirror_group;
  1445. auto param_ptr = node->cast<ParameterPtr>();
  1446. MS_EXCEPTION_IF_NULL(param_ptr);
  1447. if (param_ptr->user_data<TensorLayout>()) {
  1448. opt_shard_mirror_group = param_ptr->user_data<TensorLayout>()->opt_shard_mirror_group();
  1449. }
  1450. if (!is_shared_param && cast_node) {
  1451. allgather = ReplaceNode(op, cast_node, graph, PARALLEL_OPTIMIZER_ALLGATHER_NOT_COMPUTE, param_name, root);
  1452. MS_LOG(INFO) << "Parallel optimizer is applied before Cast for " << param_name;
  1453. } else {
  1454. auto pre_node = node;
  1455. AnfNodePtr pre_node_ = node;
  1456. auto node_user_map = manager->node_users();
  1457. TypePtr next_node_dtype = FindChildCastWithFP32ToFP16(cnode, node_user_map);
  1458. if (next_node_dtype) {
  1459. MS_LOG(INFO) << "Inserting Cast from float32 to float16 for node " << node->fullname_with_scope() << " for saving"
  1460. << " communication.";
  1461. pre_node_ = CreateFP16Cast(cnode, pre_node, next_node_dtype);
  1462. }
  1463. InsertNode(op, cnode, IntToSize(res.second), pre_node_, graph, PARALLEL_OPTIMIZER_ALLGATHER_NOT_COMPUTE, param_name,
  1464. root);
  1465. allgather = cnode->input(IntToSize(res.second))->cast<CNodePtr>();
  1466. MS_LOG(INFO) << "Parallel optimizer is applied before " << GetPrimName(cnode) << " for " << param_name;
  1467. }
  1468. // add fusion flag
  1469. AddCommOpFusionType(allgather, node);
  1470. // add gradients mean
  1471. AddCommOpMeanFlag(allgather);
  1472. if (op_name == MICRO_STEP_ALL_GATHER) {
  1473. // When grad_accumulation_shard is enabled, the ReduceScatter is inserted at each micro step
  1474. // so no need to do backward for the micro_step_allgather
  1475. AddCommOpMirrorFlag(allgather, !grad_accumulation_shard);
  1476. } else if (op_name == MINI_STEP_ALL_GATHER) {
  1477. // We need to manually set the add_accu to be false if it's father node is MirrorMiniStep
  1478. bool add_accu = root->has_flag(kAccumulation);
  1479. bool is_with_mirror = opt_shard_mirror_group.size() > 1;
  1480. AddCommOpAddAccuFlag(allgather, !add_accu && !is_with_mirror);
  1481. AddCommOpMirrorFlag(allgather, grad_accumulation_shard || !add_accu);
  1482. }
  1483. }
  1484. static void ApplyParallelOptOnParam(const FuncGraphPtr &root, const AnfNodePtr &parameter,
  1485. const std::string &opt_shard_group) {
  1486. if (opt_shard_group.empty()) {
  1487. return;
  1488. }
  1489. // set all gather type
  1490. MS_EXCEPTION_IF_NULL(parameter);
  1491. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  1492. int32_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  1493. std::string op_name;
  1494. if (grad_accumulation_step > 1) {
  1495. op_name = MINI_STEP_ALL_GATHER;
  1496. } else if (split_stage_num > 1) {
  1497. op_name = MICRO_STEP_ALL_GATHER;
  1498. } else {
  1499. op_name = ALL_GATHER;
  1500. }
  1501. // insert all gather
  1502. FuncGraphManagerPtr manager = root->manager();
  1503. MS_EXCEPTION_IF_NULL(manager);
  1504. auto param_sub_set = manager->node_users()[parameter];
  1505. bool insert_flag = false;
  1506. for (auto &param_pair : param_sub_set) {
  1507. auto cnode = param_pair.first->cast<CNodePtr>();
  1508. MS_EXCEPTION_IF_NULL(cnode);
  1509. if (cnode->in_forward_flag() && !IsPrimitiveCNode(cnode, prim::kPrimReceive) &&
  1510. !IsPrimitiveCNode(cnode, prim::kPrimDepend)) {
  1511. OperatorInfoPtr distribute_operator = cnode->user_data<OperatorInfo>();
  1512. if (distribute_operator == nullptr) {
  1513. MS_LOG(DEBUG) << "Parallel optimizer: " << GetPrimName(cnode) << " 's OperatorInfoPtr is nullptr";
  1514. } else if (IntToSize(param_pair.second - 1) >= distribute_operator->inputs_tensor_info().size()) {
  1515. MS_LOG(EXCEPTION) << "The index is out of range, index is " << (param_pair.second - 1) << ", vector size is "
  1516. << distribute_operator->inputs_tensor_info().size();
  1517. }
  1518. if (insert_flag) {
  1519. // if there are multiple node users, they share one same allgather
  1520. auto next_cnode = FindCNode(parameter, op_name, cnode->func_graph(), 0);
  1521. if (next_cnode.first) {
  1522. manager->SetEdge(cnode, param_pair.second, next_cnode.second);
  1523. MS_LOG(INFO) << "Parallel optimizer is shared between " << parameter->ToString() << " and "
  1524. << GetPrimName(cnode);
  1525. } else {
  1526. MS_LOG(ERROR) << "Can not find the shared AllGather with multiple node users.";
  1527. }
  1528. } else {
  1529. // insert allgather operator between shard parameter and cnode
  1530. auto param_ptr = parameter->cast<ParameterPtr>();
  1531. MS_EXCEPTION_IF_NULL(param_ptr);
  1532. bool is_shared_param = param_ptr->user_data<TensorLayout>()->is_shared_param();
  1533. InsertAllGatherOp(root, opt_shard_group, param_pair, parameter, op_name, is_shared_param);
  1534. insert_flag = true;
  1535. }
  1536. }
  1537. }
  1538. }
  1539. void SetSharedParameterFlag(const FuncGraphPtr &root, const AnfNodePtr &parameter) {
  1540. MS_EXCEPTION_IF_NULL(root);
  1541. MS_EXCEPTION_IF_NULL(parameter);
  1542. FuncGraphManagerPtr manager = root->manager();
  1543. MS_EXCEPTION_IF_NULL(manager);
  1544. ParameterPtr parameter_ptr = parameter->cast<ParameterPtr>();
  1545. if (parameter_ptr == nullptr) {
  1546. MS_LOG(INFO) << parameter->ToString() << ": cast to ptr failed. it may not be a parameter";
  1547. return;
  1548. }
  1549. auto user_set = manager->node_users()[parameter];
  1550. int32_t user_count = 0;
  1551. for (auto &param_pair : user_set) {
  1552. CNodePtr cnode = param_pair.first->cast<CNodePtr>();
  1553. MS_EXCEPTION_IF_NULL(cnode);
  1554. if (cnode->in_forward_flag()) user_count++;
  1555. }
  1556. if (user_count > 1) {
  1557. auto tensor_layout = parameter_ptr->user_data<TensorLayout>();
  1558. tensor_layout->set_is_shared_param(true);
  1559. MS_LOG(WARNING) << "There are multiple users for " << parameter->ToString()
  1560. << ". Mixed precision optimization is not valid here.";
  1561. }
  1562. }
  1563. // When this function returns non-empty string, that means parallel optimizer is applied on this parameter.
  1564. std::string SetParallelShape(const AnfNodePtr &parameter, const std::pair<AnfNodePtr, int64_t> &res,
  1565. const FuncGraphPtr &root) {
  1566. // check null for param and cnode
  1567. auto param_shape = parameter->Shape();
  1568. MS_EXCEPTION_IF_NULL(parameter);
  1569. MS_EXCEPTION_IF_NULL(param_shape);
  1570. CNodePtr cnode = res.first->cast<CNodePtr>();
  1571. MS_EXCEPTION_IF_NULL(cnode);
  1572. // get slice_shape
  1573. OperatorInfoPtr distribute_operator = cnode->user_data<OperatorInfo>();
  1574. if (distribute_operator == nullptr) {
  1575. MS_LOG(EXCEPTION) << "node " << cnode->ToString() << " 's distribute_operator is nullptr";
  1576. }
  1577. if (LongToSize(res.second - 1) >= distribute_operator->inputs_tensor_info().size()) {
  1578. MS_LOG(EXCEPTION) << "The parameter index is not in inputs_tensor_info. index = " << (res.second - 1)
  1579. << ", inputs_tensor_info size = " << distribute_operator->inputs_tensor_info().size();
  1580. }
  1581. TensorInfo tensorinfo_in = distribute_operator->inputs_tensor_info()[LongToSize(res.second - 1)];
  1582. TensorLayout tensor_layout = tensorinfo_in.tensor_layout();
  1583. Shape slice_shape = tensor_layout.slice_shape().array();
  1584. // generate shard group
  1585. std::string opt_shard_group;
  1586. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  1587. bool enable_parallel_optimizer = ParallelContext::GetInstance()->enable_parallel_optimizer();
  1588. if (enable_parallel_optimizer) {
  1589. std::unique_ptr<OptParamMgr> apOptParamMgr = createOptParamMgr(root);
  1590. opt_shard_group = apOptParamMgr->ShardOptGroup(parameter, &tensor_layout, distribute_operator);
  1591. // set the shape of parameter to sliced shape
  1592. if (!opt_shard_group.empty()) {
  1593. slice_shape = tensor_layout.opt_shard_slice_shape();
  1594. }
  1595. MS_LOG(INFO) << "the shape of " << parameter->ToString() << "(original: " << param_shape->ToString() << ")"
  1596. << " will be sliced into " << MakeValue(slice_shape)->ToString() << " in op "
  1597. << distribute_operator->name();
  1598. }
  1599. AbstractBasePtr abstract = parameter->abstract();
  1600. if (abstract == nullptr) {
  1601. MS_LOG(EXCEPTION) << "parameter " << parameter->ToString() << ": abstract is nullptr";
  1602. }
  1603. AbstractBasePtr cloned_abstract = abstract->Clone();
  1604. if (cloned_abstract == nullptr) {
  1605. MS_LOG(EXCEPTION) << "parameter " << parameter->ToString() << ": abstract clone failed";
  1606. }
  1607. cloned_abstract->set_shape(std::make_shared<abstract::Shape>(slice_shape));
  1608. parameter->set_abstract(cloned_abstract);
  1609. ParameterPtr parameter_ptr = parameter->cast<ParameterPtr>();
  1610. MS_EXCEPTION_IF_NULL(parameter_ptr);
  1611. parameter_ptr->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(tensor_layout));
  1612. return opt_shard_group;
  1613. }
  1614. void CoverSliceShape(const FuncGraphPtr &root) {
  1615. MS_EXCEPTION_IF_NULL(root);
  1616. auto parameters = root->parameters();
  1617. for (auto &parameter : parameters) {
  1618. MS_EXCEPTION_IF_NULL(parameter->Shape());
  1619. auto iter = g_RefMap.find(parameter);
  1620. if (iter != g_RefMap.end()) {
  1621. std::string group = SetParallelShape(parameter, g_RefMap[parameter], root);
  1622. // find all forward nodes that use parameter in graphs and insert allgather if group is not empty
  1623. SetSharedParameterFlag(root, parameter);
  1624. ApplyParallelOptOnParam(root, parameter, group);
  1625. continue;
  1626. }
  1627. std::pair<AnfNodePtr, int64_t> res = FindSubGraph(root, parameter);
  1628. if (res.first == nullptr) {
  1629. MS_LOG(INFO) << "Parameter " << parameter->ToString() << " is not in graph, thus no need to set parallel shape";
  1630. } else {
  1631. std::string group = SetParallelShape(parameter, res, root);
  1632. // find all forward nodes that use parameter in graphs and insert allgather if group is not empty
  1633. SetSharedParameterFlag(root, parameter);
  1634. ApplyParallelOptOnParam(root, parameter, group);
  1635. MS_LOG(DEBUG) << "Parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString();
  1636. }
  1637. }
  1638. g_RefMap.clear();
  1639. }
  1640. void SetVirtualDatasetStrategy(const CNodePtr &node) {
  1641. MS_EXCEPTION_IF_NULL(node);
  1642. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  1643. bool full_batch = ParallelContext::GetInstance()->full_batch();
  1644. PrimitivePtr prim = GetValueNode<PrimitivePtr>(node->input(0));
  1645. MS_EXCEPTION_IF_NULL(prim);
  1646. if (prim->name() == VIRTUAL_DATA_SET || prim->name() == VIRTUAL_OUTPUT) {
  1647. CheckGlobalDeviceManager();
  1648. auto attrs_temp = prim->attrs();
  1649. if (!ParallelContext::GetInstance()->dataset_strategy().empty() && prim->name() == VIRTUAL_DATA_SET) {
  1650. std::vector<ValuePtr> elements;
  1651. auto dataset_strategy = ParallelContext::GetInstance()->dataset_strategy();
  1652. (void)std::transform(dataset_strategy.begin(), dataset_strategy.end(), std::back_inserter(elements),
  1653. [](auto input_stra) { return MakeValue(input_stra); });
  1654. ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
  1655. attrs_temp[IN_STRATEGY] = strategy;
  1656. (void)prim->SetAttrs(attrs_temp);
  1657. if (prim->HasAttr(REPEAT_DIM_DIRECT) && GetValue<std::string>(prim->GetAttr(REPEAT_DIM_DIRECT)) == RIGHT) {
  1658. ParallelContext::GetInstance()->set_dataset_repeat_dim_right(true);
  1659. MS_LOG(INFO) << "dataset repeat dim is right";
  1660. }
  1661. return;
  1662. }
  1663. int64_t dev_num;
  1664. if (full_batch) {
  1665. dev_num = 1;
  1666. } else {
  1667. dev_num = g_device_manager->stage_device_num();
  1668. }
  1669. if (dev_num == 0) {
  1670. MS_LOG(EXCEPTION) << "Device Num must be larger than 0, but got 0.";
  1671. }
  1672. std::vector<Shapes> shape_list = ExtractShape(node);
  1673. if (shape_list.empty()) {
  1674. MS_LOG(EXCEPTION) << "Failure:node " << node->ToString() << " failed to extract shape";
  1675. }
  1676. std::vector<ValuePtr> elements;
  1677. for (size_t i = 0; i < shape_list[0].size(); i++) {
  1678. if (shape_list[0][i].empty()) {
  1679. MS_LOG(EXCEPTION) << "shape_list[ " << i << " ].size() is zero";
  1680. }
  1681. Dimensions input_strategy;
  1682. if (!shape_list[0][i].empty() && shape_list[0][i][0] % dev_num == 0) {
  1683. input_strategy.push_back(dev_num);
  1684. } else if (!shape_list[0][i].empty()) {
  1685. input_strategy.push_back(1);
  1686. }
  1687. for (size_t j = 1; j < shape_list[0][i].size(); j++) {
  1688. input_strategy.push_back(1);
  1689. }
  1690. elements.push_back(MakeValue(input_strategy));
  1691. }
  1692. ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
  1693. attrs_temp[IN_STRATEGY] = strategy;
  1694. (void)prim->SetAttrs(attrs_temp);
  1695. }
  1696. }
  1697. // find previous parallel care node's next node.
  1698. bool FindPreNodes(const AnfNodePtr &node, std::vector<std::string> *unique_ids, std::vector<size_t> *indexes,
  1699. size_t curr_depth) {
  1700. if (curr_depth > MAX_RECURSIVE_DEPTH) {
  1701. MS_LOG(WARNING) << "When find the previous node, exceeded the maximum recursion depth: " << MAX_RECURSIVE_DEPTH;
  1702. return false;
  1703. }
  1704. MS_EXCEPTION_IF_NULL(unique_ids);
  1705. MS_EXCEPTION_IF_NULL(indexes);
  1706. if (!node->isa<CNode>()) {
  1707. return false;
  1708. }
  1709. CNodePtr pre_cnode = node->cast<CNodePtr>();
  1710. if (!IsValueNode<Primitive>(pre_cnode->input(0))) {
  1711. return false;
  1712. }
  1713. bool find = false;
  1714. for (size_t index = 1; index < pre_cnode->inputs().size(); ++index) {
  1715. auto next_node = pre_cnode->inputs()[index];
  1716. if (!next_node->isa<CNode>() || next_node->isa<Parameter>()) {
  1717. return false;
  1718. }
  1719. CNodePtr cnode = next_node->cast<CNodePtr>();
  1720. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1721. return false;
  1722. }
  1723. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1724. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1725. if (IsParallelCareNode(cnode) && prim->name() != MAKE_TUPLE && prim->name() != MAKE_LIST) {
  1726. unique_ids->push_back(pre_cnode->UniqueId());
  1727. indexes->push_back(index);
  1728. find = true;
  1729. continue;
  1730. }
  1731. if (FindPreNodes(cnode, unique_ids, indexes, ++curr_depth)) {
  1732. find = true;
  1733. continue;
  1734. }
  1735. }
  1736. return find;
  1737. }
  1738. void FindLastNodesUniqueId(const FuncGraphPtr &root, std::vector<std::string> *unique_ids,
  1739. std::vector<size_t> *indexes) {
  1740. MS_EXCEPTION_IF_NULL(unique_ids);
  1741. CNodePtr cnode = root->get_return();
  1742. if (!FindPreNodes(cnode, unique_ids, indexes, 0)) {
  1743. MS_LOG(WARNING) << "cannot find the last parallel care node in eval graph";
  1744. }
  1745. }
  1746. StrategyPtr GenerateBatchParallelStrategy(const OperatorInfoPtr operator_, const PrimitivePtr prim) {
  1747. MS_EXCEPTION_IF_NULL(operator_);
  1748. MS_EXCEPTION_IF_NULL(prim);
  1749. StrategyPtr strategyPtr;
  1750. std::shared_ptr<Strategys> strategy_v_ptr = operator_->GenerateBatchStrategies();
  1751. MS_EXCEPTION_IF_NULL(strategy_v_ptr);
  1752. strategyPtr = NewStrategy(0, *strategy_v_ptr);
  1753. std::vector<ValuePtr> elements;
  1754. for (size_t i = 0; i < strategy_v_ptr->size(); i++) {
  1755. elements.push_back(MakeValue((*strategy_v_ptr)[i]));
  1756. }
  1757. ValueTuplePtr strategy = std::make_shared<ValueTuple>(elements);
  1758. // display the strategy generated by batch parallel
  1759. auto attrs = prim->attrs();
  1760. attrs[GEN_STRATEGY] = strategy;
  1761. (void)prim->SetAttrs(attrs);
  1762. MS_LOG(INFO) << "prim " << prim->name() << " batch parallel strategy is " << attrs[GEN_STRATEGY]->ToString();
  1763. return strategyPtr;
  1764. }
  1765. static bool CheckExtractInfomation(const CNodePtr &cnode) {
  1766. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  1767. return false;
  1768. }
  1769. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1770. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1771. if ((prim->name() == MAKE_TUPLE) || (prim->name() == MAKE_LIST) || (prim->name() == RECEIVE)) {
  1772. return false;
  1773. }
  1774. if (!IsParallelCareNode(cnode)) {
  1775. return false;
  1776. }
  1777. return true;
  1778. }
  1779. static void ExtractStrategyAndInit(const CNodePtr &cnode, const PrimitivePtr &prim, const OperatorInfoPtr &op_info) {
  1780. StrategyPtr in_strategy = nullptr, out_strategy = nullptr;
  1781. auto attrs = prim->attrs();
  1782. // load strategy map from checkpoint
  1783. StrategyMap stra_map;
  1784. if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
  1785. (StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS)) {
  1786. MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
  1787. }
  1788. std::string strategy_key_name = "";
  1789. auto param_names = NodeParameterName(cnode, -1, 0);
  1790. if (!param_names.empty()) {
  1791. strategy_key_name = prim->name() + "_" + param_names[0].first;
  1792. }
  1793. bool load_strategy_from_ckpt =
  1794. StrategyCheckpoint::GetInstance().LoadCheckPointOn() && stra_map.find(strategy_key_name) != stra_map.end();
  1795. if ((!StrategyFound(attrs) && !load_strategy_from_ckpt) && !cnode->HasPrimalAttr(IN_STRATEGY)) {
  1796. MS_LOG(INFO) << "ExtractInformation: the strategy of node " << cnode->ToString() << " prim " << prim->name()
  1797. << " is empty, using batch parallel";
  1798. in_strategy = GenerateBatchParallelStrategy(op_info, prim);
  1799. } else if (cnode->HasPrimalAttr(IN_STRATEGY)) {
  1800. in_strategy = ExtractStrategy(cnode->GetPrimalAttr(IN_STRATEGY));
  1801. out_strategy = ExtractStrategy(cnode->GetPrimalAttr(OUT_STRATEGY));
  1802. } else if (StrategyFound(attrs)) {
  1803. in_strategy = ExtractStrategy(attrs[IN_STRATEGY]);
  1804. out_strategy = ExtractStrategy(attrs[OUT_STRATEGY]);
  1805. } else {
  1806. in_strategy = stra_map[strategy_key_name];
  1807. }
  1808. MS_EXCEPTION_IF_NULL(in_strategy);
  1809. if (op_info->Init(in_strategy, out_strategy) == FAILED) {
  1810. MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed" << trace::DumpSourceLines(cnode);
  1811. }
  1812. }
  1813. void ExtractInformation(const std::vector<AnfNodePtr> &all_nodes) {
  1814. SetStridedSliceSplitStrategy(all_nodes);
  1815. for (auto &node : all_nodes) {
  1816. auto cnode = node->cast<CNodePtr>();
  1817. if (!CheckExtractInfomation(cnode) || IsPrimitiveCNode(node, prim::kPrimSend)) {
  1818. continue;
  1819. }
  1820. SetVirtualDatasetStrategy(cnode);
  1821. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  1822. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  1823. auto attrs = prim->attrs();
  1824. MS_LOG(INFO) << "extract information: node: " << node->ToString() << " prim " << prim->name();
  1825. std::vector<Shapes> shape_list = ExtractShape(cnode);
  1826. if (shape_list.empty()) {
  1827. MS_LOG(EXCEPTION) << "Failure:node " << node->ToString() << " failed to extract shape";
  1828. }
  1829. OperatorInfoPtr operator_ = OperatorInstance(prim, attrs, shape_list);
  1830. MS_EXCEPTION_IF_NULL(operator_);
  1831. auto &inputs = cnode->inputs();
  1832. std::vector<ValuePtr> input_value;
  1833. for (size_t index = 1; index < inputs.size(); ++index) {
  1834. if (inputs[index]->isa<ValueNode>()) {
  1835. input_value.push_back(GetValueNode(inputs[index]));
  1836. continue;
  1837. }
  1838. input_value.emplace_back(nullptr);
  1839. }
  1840. (*operator_).set_input_value(input_value);
  1841. (*operator_).set_outputs_dtype(cnode->Type());
  1842. (*operator_).set_cnode(cnode);
  1843. if (prim->name() == RESHAPE) {
  1844. cnode->set_user_data<OperatorInfo>(operator_);
  1845. continue;
  1846. }
  1847. ExtractStrategyAndInit(cnode, prim, operator_);
  1848. cnode->set_user_data<OperatorInfo>(operator_);
  1849. }
  1850. }
  1851. TensorLayout GetInputLayoutFromCNode(const std::pair<AnfNodePtr, int64_t> &node_pair) {
  1852. CNodePtr cnode = node_pair.first->cast<CNodePtr>();
  1853. MS_EXCEPTION_IF_NULL(cnode);
  1854. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  1855. MS_EXCEPTION_IF_NULL(distribute_operator);
  1856. int64_t index = node_pair.second;
  1857. if (index > SizeToLong(distribute_operator->inputs_tensor_info().size())) {
  1858. MS_LOG(EXCEPTION) << "The index is out of range, the node_pair.second is " << (index - 1)
  1859. << ", the vector size is " << distribute_operator->inputs_tensor_info().size();
  1860. }
  1861. TensorInfo tensorinfo_in = distribute_operator->inputs_tensor_info()[LongToSize(index - 1)];
  1862. TensorLayout tensorlayout_in = tensorinfo_in.tensor_layout();
  1863. return tensorlayout_in;
  1864. }
  1865. // if reshape's output connect to several primitive, return the first layout found
  1866. std::shared_ptr<TensorLayout> FindNextLayout(const CNodePtr &cnode, bool *next_is_reshape) {
  1867. MS_EXCEPTION_IF_NULL(cnode);
  1868. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  1869. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  1870. MS_EXCEPTION_IF_NULL(manager);
  1871. AnfNodeIndexSet node_set = manager->node_users()[cnode];
  1872. for (auto &node_pair : node_set) {
  1873. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  1874. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  1875. continue;
  1876. }
  1877. if (IsPrimitiveCNode(use_apply, prim::kPrimReshape)) {
  1878. *next_is_reshape = true;
  1879. continue;
  1880. }
  1881. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  1882. MS_EXCEPTION_IF_NULL(prim_anf_node);
  1883. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1884. MS_EXCEPTION_IF_NULL(node_prim);
  1885. MS_LOG(INFO) << "FindNextLayout prim " << node_prim->name();
  1886. if (node_prim->name() == DEPEND && node_pair.second != 1) {
  1887. continue;
  1888. }
  1889. if (IsParallelCareNode(use_apply) && use_apply->has_user_data<OperatorInfo>()) {
  1890. MS_LOG(INFO) << "FindNextLayout success prim " << node_prim->name();
  1891. *next_is_reshape = false;
  1892. auto layout = GetInputLayoutFromCNode(node_pair);
  1893. return std::make_shared<TensorLayout>(layout);
  1894. }
  1895. MS_LOG(DEBUG) << "FindNextLayout failed prim " << node_prim->name() << " " << IsParallelCareNode(use_apply)
  1896. << " " << use_apply->has_user_data<OperatorInfo>();
  1897. auto layout_ptr = FindNextLayout(use_apply, next_is_reshape);
  1898. if (layout_ptr) {
  1899. return layout_ptr;
  1900. }
  1901. }
  1902. MS_LOG(WARNING) << "FindNextLayout return nullptr, if reshape is not the last primitive, there must be some error";
  1903. return nullptr;
  1904. }
  1905. std::shared_ptr<TensorLayout> GetOutputLayoutFromCNode(const CNodePtr &cnode, size_t output_index) {
  1906. MS_EXCEPTION_IF_NULL(cnode);
  1907. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  1908. MS_EXCEPTION_IF_NULL(distribute_operator);
  1909. if (distribute_operator->outputs_tensor_info().size() <= output_index) {
  1910. MS_LOG(EXCEPTION) << "outputs_tensor_info size is " << distribute_operator->inputs_tensor_info().size()
  1911. << ", must be greater than output_index " << output_index;
  1912. }
  1913. TensorInfo tensorinfo_out = distribute_operator->outputs_tensor_info()[output_index];
  1914. TensorLayout tensorlayout_out = tensorinfo_out.tensor_layout();
  1915. return std::make_shared<TensorLayout>(tensorlayout_out);
  1916. }
  1917. std::shared_ptr<TensorLayout> FindPrevParallelCareNodeLayout(const AnfNodePtr &node, size_t output_index) {
  1918. if (!node->isa<CNode>()) {
  1919. return nullptr;
  1920. }
  1921. CNodePtr cnode = node->cast<CNodePtr>();
  1922. if (!IsValueNode<Primitive>(cnode->input(0))) {
  1923. return nullptr;
  1924. }
  1925. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>()) {
  1926. auto layout_ptr = GetOutputLayoutFromCNode(cnode, output_index);
  1927. if (!layout_ptr) {
  1928. MS_LOG(EXCEPTION) << "Failure:GetLayoutFromCNode failed";
  1929. }
  1930. return layout_ptr;
  1931. }
  1932. return nullptr;
  1933. }
  1934. std::shared_ptr<TensorLayout> FindParameterNextLayout(const AnfNodePtr &node, size_t curr_depth) {
  1935. if (curr_depth > MAX_RECURSIVE_DEPTH) {
  1936. MS_LOG(WARNING) << "When finding the next tensor layout for the parameter, exceeded the maximum recursion depth: "
  1937. << MAX_RECURSIVE_DEPTH;
  1938. return nullptr;
  1939. }
  1940. FuncGraphManagerPtr manager = node->func_graph()->manager();
  1941. MS_EXCEPTION_IF_NULL(manager);
  1942. AnfNodeIndexSet node_set = manager->node_users()[node];
  1943. for (auto &node_pair : node_set) {
  1944. if (IsPrimitiveCNode(node_pair.first, prim::kPrimLoad)) {
  1945. auto layout_param = FindParameterNextLayout(node_pair.first, ++curr_depth);
  1946. if (!layout_param) {
  1947. continue;
  1948. }
  1949. return layout_param;
  1950. }
  1951. CNodePtr use_apply = node_pair.first->cast<CNodePtr>();
  1952. if (use_apply == nullptr || !IsValueNode<Primitive>(use_apply->input(0))) {
  1953. continue;
  1954. }
  1955. ValueNodePtr prim_anf_node = use_apply->input(0)->cast<ValueNodePtr>();
  1956. MS_EXCEPTION_IF_NULL(prim_anf_node);
  1957. PrimitivePtr node_prim = prim_anf_node->value()->cast<PrimitivePtr>();
  1958. MS_EXCEPTION_IF_NULL(node_prim);
  1959. if ((node_prim->name() == DEPEND && node_pair.second != 1) || node_prim->name() == RESHAPE) {
  1960. continue;
  1961. }
  1962. if (IsParallelCareNode(use_apply) && use_apply->has_user_data<OperatorInfo>()) {
  1963. auto layout = GetInputLayoutFromCNode(node_pair);
  1964. return std::make_shared<TensorLayout>(layout);
  1965. }
  1966. }
  1967. return nullptr;
  1968. }
  1969. std::shared_ptr<TensorLayout> CreateParameterLayout(const AnfNodePtr &node) {
  1970. // Create DataParallel tensor layout for parameter(support WideDeep).
  1971. auto next_layout = FindParameterNextLayout(node, 0);
  1972. if (next_layout != nullptr) {
  1973. return next_layout;
  1974. }
  1975. CheckGlobalDeviceManager();
  1976. int64_t dev_num = g_device_manager->stage_device_num();
  1977. TensorLayout input_tensor_layout;
  1978. // create input_shape
  1979. Shapes inputs_shape = GetNodeShape(node);
  1980. Shape input_shape_array = inputs_shape[0];
  1981. if (input_shape_array.empty()) {
  1982. MS_LOG(EXCEPTION) << "Don't support reshape a scalar parameter.";
  1983. }
  1984. // create tensor_map
  1985. size_t shape_size = input_shape_array.size();
  1986. TensorMap input_tensor_map_array(SizeToLong(shape_size) - 1, -1);
  1987. input_tensor_map_array.insert(input_tensor_map_array.begin(), 0);
  1988. // create dev_matrix
  1989. Shape dev_matrix_array = {dev_num};
  1990. if (input_tensor_layout.InitFromVector(dev_matrix_array, input_tensor_map_array, input_shape_array) != SUCCESS) {
  1991. MS_LOG(EXCEPTION) << "Create tensor layout for parameter failed.";
  1992. }
  1993. return std::make_shared<TensorLayout>(input_tensor_layout);
  1994. }
  1995. RedistributionOpListPtr InferSensRedistribution(const AnfNodePtr &node, const TensorLayout &loss_layout) {
  1996. MS_EXCEPTION_IF_NULL(node);
  1997. TensorRedistribution tensor_redistribution;
  1998. // create stand alone layout:TensorMap:[all -1],dev_matrix:[dev_num].
  1999. CheckGlobalDeviceManager();
  2000. int64_t dev_num = g_device_manager->stage_device_num();
  2001. TensorLayout stand_alone_layout;
  2002. Shapes inputs_shape = GetNodeShape(node);
  2003. if (inputs_shape.empty()) {
  2004. MS_LOG(EXCEPTION) << "InferSensRedistribution failed cause inputs shape is empty.";
  2005. }
  2006. Shape input_shape_array = inputs_shape[0];
  2007. if (input_shape_array.empty()) {
  2008. MS_LOG(INFO) << "No need to redistribution for sens.";
  2009. return nullptr;
  2010. }
  2011. // TensorMap
  2012. TensorMap stand_alone_tensor_map_array(SizeToLong(input_shape_array.size()), -1);
  2013. // Dev_matrix
  2014. Shape dev_matrix_array = {dev_num};
  2015. if (stand_alone_layout.InitFromVector(dev_matrix_array, stand_alone_tensor_map_array, input_shape_array) == FAILED) {
  2016. MS_LOG(EXCEPTION) << "Create tensor layout for Sens failed.";
  2017. }
  2018. // Infer Redistribution op list for stand alone and loss layout.
  2019. RankList dev_list = g_device_manager->GetDeviceListInThisStage();
  2020. if (tensor_redistribution.Init(stand_alone_layout, loss_layout, dev_list) == FAILED) {
  2021. MS_LOG(EXCEPTION) << "Redistribution for Sens init failed.";
  2022. }
  2023. RedistributionOpListPtr sens_redistribution_list = tensor_redistribution.InferTensorRedistributionOperatorList();
  2024. MS_EXCEPTION_IF_NULL(sens_redistribution_list);
  2025. return sens_redistribution_list;
  2026. }
  2027. std::shared_ptr<TensorLayout> FindPrevLayout(const AnfNodePtr &node) {
  2028. if (node->isa<Parameter>()) {
  2029. return CreateParameterLayout(node);
  2030. }
  2031. if (!node->isa<CNode>()) {
  2032. return nullptr;
  2033. }
  2034. CNodePtr cnode = node->cast<CNodePtr>();
  2035. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2036. return nullptr;
  2037. }
  2038. if (IsPrimitiveCNode(node, prim::kPrimReceive)) {
  2039. return cnode->user_data<TensorLayout>();
  2040. }
  2041. if (IsParallelCareNode(cnode) && cnode->has_user_data<OperatorInfo>() &&
  2042. !IsPrimitiveCNode(node, prim::kPrimReshape)) {
  2043. auto layout_ptr = GetOutputLayoutFromCNode(cnode, 0);
  2044. if (!layout_ptr) {
  2045. MS_LOG(EXCEPTION) << "Failure:GetLayoutFromCNode failed";
  2046. }
  2047. return layout_ptr;
  2048. }
  2049. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  2050. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  2051. if (prim->name() == prim::kTupleGetItem) {
  2052. auto tuple_index = GetTupleGetItemIndex(cnode);
  2053. auto layout_ptr = FindPrevParallelCareNodeLayout(cnode->input(1), LongToSize(tuple_index));
  2054. if (!layout_ptr) {
  2055. MS_LOG(EXCEPTION) << " Failure:FindPrevLayout failed, tuple_getitem before reshape, but there does not exit a "
  2056. "parallel care node "
  2057. "before tuple_getitem!";
  2058. }
  2059. return layout_ptr;
  2060. }
  2061. for (size_t index = 0; index < cnode->inputs().size(); ++index) {
  2062. if (prim->name() == DEPEND && index != 1) {
  2063. continue;
  2064. }
  2065. auto layout_ptr = FindPrevLayout(cnode->inputs()[index]);
  2066. if (!layout_ptr) {
  2067. continue;
  2068. }
  2069. return layout_ptr;
  2070. }
  2071. MS_LOG(WARNING) << "FindPrevLayout return nullptr, if reshape is not the first primitive, there must be some error";
  2072. return nullptr;
  2073. }
  2074. void ReshapeInit(const std::vector<AnfNodePtr> &all_nodes) {
  2075. for (auto &node : all_nodes) {
  2076. auto cnode = node->cast<CNodePtr>();
  2077. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  2078. continue;
  2079. }
  2080. ValueNodePtr prim_anf_node = cnode->input(0)->cast<ValueNodePtr>();
  2081. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  2082. continue;
  2083. }
  2084. PrimitivePtr prim = GetValueNode<PrimitivePtr>(prim_anf_node);
  2085. MS_EXCEPTION_IF_NULL(prim);
  2086. OperatorInfoPtr operator_info = cnode->user_data<OperatorInfo>();
  2087. if (operator_info == nullptr) {
  2088. MS_LOG(EXCEPTION) << "Failure:Primitive " << prim->ToString() << " OperatorInstance is nullptr";
  2089. }
  2090. if (prim->name() != RESHAPE) {
  2091. continue;
  2092. }
  2093. auto attrs = prim->attrs();
  2094. if (StrategyFound(attrs)) {
  2095. MS_LOG(EXCEPTION) << "Setting strategy for Reshape goes for nothing!";
  2096. }
  2097. MS_ASSERT(cnode->inputs().size() == RESHAPE_INPUT_SIZE);
  2098. auto prev_layout_ptr = FindPrevLayout(cnode->input(1));
  2099. if (prev_layout_ptr) {
  2100. auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  2101. reshape_info_ptr->SetInputLayout(*prev_layout_ptr);
  2102. }
  2103. bool is_next_reshape = false;
  2104. auto next_layout_ptr = FindNextLayout(cnode, &is_next_reshape);
  2105. if (next_layout_ptr) {
  2106. auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  2107. reshape_info_ptr->SetOutputLayout(*next_layout_ptr);
  2108. } else if (is_next_reshape && prev_layout_ptr != nullptr) {
  2109. auto reshape_info_ptr = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
  2110. reshape_info_ptr->SetOutputLayout(*prev_layout_ptr);
  2111. }
  2112. if (operator_info->Init(nullptr, nullptr) == FAILED) {
  2113. MS_LOG(EXCEPTION) << "Failure:operator " << prim->ToString() << " init failed";
  2114. }
  2115. }
  2116. }
  2117. CNodePtr HandleDependLoss(const CNodePtr &cnode, size_t curr_depth) {
  2118. if (curr_depth > MAX_RECURSIVE_DEPTH) {
  2119. MS_LOG(WARNING) << "When handling the loss node of Depend, exceeded the max recursive depth: "
  2120. << MAX_RECURSIVE_DEPTH;
  2121. return nullptr;
  2122. }
  2123. // Handle return->depend->loss
  2124. if (IsPrimitiveCNode(cnode, prim::kPrimDepend) ||
  2125. (IsPrimitiveCNode(cnode, prim::kPrimCast) && !cnode->has_user_data<OperatorInfo>())) {
  2126. auto depend_before = cnode->input(1)->cast<CNodePtr>();
  2127. MS_EXCEPTION_IF_NULL(depend_before);
  2128. return HandleDependLoss(depend_before, ++curr_depth);
  2129. }
  2130. return cnode;
  2131. }
  2132. LossNodeInfo FindLossCNode(const FuncGraphPtr &func_graph, size_t max_depth) {
  2133. if (max_depth > MAX_RECURSIVE_DEPTH) {
  2134. MS_LOG(EXCEPTION) << "Recursive call is larger than 100000.";
  2135. }
  2136. LossNodeInfo loss_node_info;
  2137. MS_EXCEPTION_IF_NULL(func_graph);
  2138. CNodePtr return_node = func_graph->get_return();
  2139. MS_EXCEPTION_IF_NULL(return_node);
  2140. if (return_node->size() < 2) {
  2141. MS_LOG(EXCEPTION) << "Failure: " << return_node->DebugString() << " size is smaller than 2";
  2142. }
  2143. AnfNodePtr pre_node = return_node->input(1);
  2144. MS_EXCEPTION_IF_NULL(pre_node);
  2145. auto pre_cnode = pre_node->cast<CNodePtr>();
  2146. pre_cnode = HandleDependLoss(pre_cnode, 0);
  2147. if (pre_cnode->input(0)->isa<CNode>()) {
  2148. auto switch_cnode = pre_cnode->input(0)->cast<CNodePtr>();
  2149. if (IsPrimitiveCNode(switch_cnode, prim::kPrimSwitch)) {
  2150. MS_EXCEPTION_IF_NULL(switch_cnode);
  2151. auto switch_graph = GetValueNode<FuncGraphPtr>(switch_cnode->input(2));
  2152. return FindLossCNode(switch_graph, max_depth + 1);
  2153. }
  2154. }
  2155. if (pre_cnode == nullptr || !IsValueNode<Primitive>(pre_cnode->input(0))) {
  2156. return loss_node_info;
  2157. }
  2158. if (!IsValueNode<Primitive>(pre_cnode->input(0))) {
  2159. MS_LOG(DEBUG) << "pre_cnode:" << pre_cnode->ToString();
  2160. return loss_node_info;
  2161. }
  2162. auto current_prim = GetValueNode<PrimitivePtr>(pre_cnode->input(0));
  2163. // notice: the GetNext op has not input
  2164. if (INVALID_LOSS_OPS.find(current_prim->name()) != INVALID_LOSS_OPS.end()) {
  2165. MS_LOG(INFO) << "The loss is: " << current_prim->name();
  2166. loss_node_info.loss_node = pre_cnode;
  2167. return loss_node_info;
  2168. }
  2169. // size of common cnode is larger than 1
  2170. if (pre_cnode->size() < 2) {
  2171. MS_LOG(EXCEPTION) << pre_cnode->ToString() << " size( " << pre_cnode->inputs().size() << " ) is smaller than 2";
  2172. }
  2173. // return -> tuple_getitem -> loss
  2174. if (current_prim->name() == prim::kTupleGetItem) {
  2175. auto tuple_index = GetTupleGetItemIndex(pre_cnode);
  2176. AnfNodePtr pre_pre_node = pre_cnode->input(1);
  2177. MS_EXCEPTION_IF_NULL(pre_pre_node);
  2178. auto pre_pre_cnode = pre_pre_node->cast<CNodePtr>();
  2179. loss_node_info.has_tuple_getitem = true;
  2180. loss_node_info.dout_index = tuple_index;
  2181. loss_node_info.loss_node = pre_pre_cnode;
  2182. return loss_node_info;
  2183. }
  2184. // return -> make_tuple
  2185. if (current_prim->name() == MAKE_TUPLE) {
  2186. MS_LOG(WARNING) << "The loss have make_tuple, it is not supported";
  2187. return loss_node_info;
  2188. }
  2189. // return -> loss
  2190. loss_node_info.loss_node = pre_cnode;
  2191. MS_LOG(DEBUG) << "The loss name is " << current_prim->name();
  2192. return loss_node_info;
  2193. }
  2194. TensorLayouts GetLossNodeGradOutputLayout(const LossNodeInfo &node_info) {
  2195. TensorLayouts ret;
  2196. auto loss_cnode = node_info.loss_node;
  2197. MS_EXCEPTION_IF_NULL(loss_cnode);
  2198. ValueNodePtr prim_anf_node = loss_cnode->input(0)->cast<ValueNodePtr>();
  2199. MS_EXCEPTION_IF_NULL(prim_anf_node);
  2200. PrimitivePtr prim = prim_anf_node->value()->cast<PrimitivePtr>();
  2201. MS_EXCEPTION_IF_NULL(prim);
  2202. if (INVALID_LOSS_OPS.find(prim->name()) != INVALID_LOSS_OPS.end()) {
  2203. MS_LOG(WARNING) << "The loss name is: " << prim->name() << ", do nothing for split sens now";
  2204. return ret;
  2205. }
  2206. OperatorInfoPtr operator_info = loss_cnode->user_data<OperatorInfo>();
  2207. MS_EXCEPTION_IF_NULL(operator_info);
  2208. TensorInfo loss_grad_tensor_info;
  2209. size_t op_output_size = operator_info->outputs_tensor_info().size();
  2210. MS_LOG(INFO) << "The loss name is " << operator_info->name() << ", the has tuple item is "
  2211. << node_info.has_tuple_getitem << ", the output size is " << op_output_size << ", the dout_index is "
  2212. << node_info.dout_index;
  2213. if ((op_output_size == 0) || (op_output_size <= LongToSize(node_info.dout_index))) {
  2214. MS_LOG(EXCEPTION) << "The index is " << node_info.dout_index << ", but the size of outputs is " << op_output_size;
  2215. }
  2216. if (!node_info.has_tuple_getitem && (op_output_size > 1)) {
  2217. MS_LOG(EXCEPTION) << "Currently, it is not supported that the sens is a tuple.";
  2218. }
  2219. loss_grad_tensor_info = operator_info->outputs_tensor_info()[LongToSize(node_info.dout_index)];
  2220. ret.push_back(loss_grad_tensor_info.tensor_layout());
  2221. return ret;
  2222. }
  2223. void SplitSens(const CNodePtr &grad_sens_node, const TensorLayout &loss_grad_layout) {
  2224. MS_EXCEPTION_IF_NULL(grad_sens_node);
  2225. if (grad_sens_node->size() <= 1) {
  2226. MS_LOG(EXCEPTION) << "The size of grad sens node is smaller than 2";
  2227. }
  2228. AnfNodePtr sens_tensor_node = grad_sens_node->input(1);
  2229. MS_EXCEPTION_IF_NULL(sens_tensor_node);
  2230. Shapes sens_shapes = GetNodeShape(sens_tensor_node);
  2231. if (sens_shapes.size() != 1) {
  2232. MS_LOG(EXCEPTION) << "GetNodeShape for sens_tensor_node, output size is not 1";
  2233. }
  2234. // If the shape of sens tensor is [] or [1], no need to split it.
  2235. Shape sens_shape = sens_shapes[0];
  2236. if (sens_shape.empty() || ((sens_shape.size() == 1) && (sens_shape[0] == 1))) {
  2237. if (sens_tensor_node->isa<Parameter>()) {
  2238. auto sens_tensor_param = sens_tensor_node->cast<ParameterPtr>();
  2239. MS_LOG(DEBUG) << "loss layout " << loss_grad_layout.ToString();
  2240. sens_tensor_param->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(loss_grad_layout));
  2241. }
  2242. MS_LOG(INFO) << "The shape of sens is " << ShapeToString(sens_shape) << ", no need to split sens";
  2243. return;
  2244. }
  2245. auto loss_shape = loss_grad_layout.tensor_shape().array();
  2246. if (loss_shape != sens_shape) {
  2247. MS_LOG(EXCEPTION) << "The shape of sens is not equal to loss output, it is unsupported now. Sens shape is "
  2248. << ShapeToString(sens_shape) << ", loss shape is " << ShapeToString(loss_shape);
  2249. }
  2250. MS_LOG(INFO) << "The shape of sens is " << ShapeToString(sens_shape) << ", split it.";
  2251. if (!IsValueNode<Tensor>(sens_tensor_node)) {
  2252. if (sens_tensor_node->isa<Parameter>()) {
  2253. MS_LOG(DEBUG) << "loss layout " << loss_grad_layout.ToString();
  2254. AbstractBasePtr abstract = sens_tensor_node->abstract();
  2255. MS_EXCEPTION_IF_NULL(abstract);
  2256. auto slice_shape = loss_grad_layout.slice_shape().array();
  2257. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  2258. MS_EXCEPTION_IF_NULL(parallel_shape);
  2259. auto cloned_abstract = abstract->Clone();
  2260. MS_EXCEPTION_IF_NULL(cloned_abstract);
  2261. cloned_abstract->set_shape(parallel_shape);
  2262. sens_tensor_node->set_abstract(cloned_abstract);
  2263. auto sens_tensor_param = sens_tensor_node->cast<ParameterPtr>();
  2264. sens_tensor_param->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(loss_grad_layout));
  2265. return;
  2266. }
  2267. if (sens_tensor_node->isa<CNode>()) {
  2268. auto op_list_ptr = InferSensRedistribution(sens_tensor_node, loss_grad_layout);
  2269. if (op_list_ptr == nullptr) {
  2270. return;
  2271. }
  2272. auto sens_tensor_cnode = sens_tensor_node->cast<CNodePtr>();
  2273. auto func_graph = grad_sens_node->func_graph();
  2274. MS_EXCEPTION_IF_NULL(func_graph);
  2275. InsertRedistribution(op_list_ptr, grad_sens_node, func_graph, 1, sens_tensor_cnode);
  2276. return;
  2277. }
  2278. MS_LOG(EXCEPTION) << "The type of sens node is not Tensor or Parameter or CNode, it is unsupported now.";
  2279. }
  2280. // Use _GetTensorSlice operator to split the sens tensor
  2281. FuncGraphPtr func_graph = grad_sens_node->func_graph(); // only cnode can get the graph
  2282. MS_EXCEPTION_IF_NULL(func_graph);
  2283. Operator op = CreateGetTensorSliceOp(loss_grad_layout);
  2284. InsertGetTensorSliceOp(op, grad_sens_node, func_graph, 1, SPLIT_SENS);
  2285. }
  2286. void InsertForwardOps(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2287. MS_EXCEPTION_IF_NULL(distribute_operator);
  2288. MS_EXCEPTION_IF_NULL(cnode);
  2289. if (IsPrimitiveCNode(cnode, prim::kPrimReceive)) {
  2290. return;
  2291. }
  2292. OperatorVector forward_op = distribute_operator->forward_op();
  2293. if (!forward_op.empty()) {
  2294. MS_LOG(INFO) << "Insert forward op for " << distribute_operator->name();
  2295. ForwardCommunication(forward_op, cnode);
  2296. }
  2297. }
  2298. void StepReplace(const OperatorInfoPtr &distribute_operator, const CNodePtr &cnode) {
  2299. MS_EXCEPTION_IF_NULL(distribute_operator);
  2300. MS_EXCEPTION_IF_NULL(cnode);
  2301. // StepReplaceOp
  2302. OperatorVector replace_op = distribute_operator->replace_op();
  2303. if (!replace_op.empty()) {
  2304. MS_LOG(INFO) << "StepReplaceOp " << cnode->ToString();
  2305. StepReplaceOp(replace_op, cnode);
  2306. }
  2307. // StepReplaceGraph: after calling StepReplaceGraph, cnode can not be used anymore.
  2308. ReplaceGraphPtr replace_graph = distribute_operator->replace_graph(cnode);
  2309. if (!replace_op.empty() && replace_graph) {
  2310. MS_LOG(EXCEPTION) << "Only one of replace_op or replace_op can be used";
  2311. }
  2312. if (replace_graph) {
  2313. MS_LOG(INFO) << "StepReplaceGraph " << cnode->ToString();
  2314. StepReplaceGraph(replace_graph, cnode);
  2315. }
  2316. }
  2317. std::set<FuncGraphPtr> FindForwardGraphByRootNodes(const AnfNodeSet &root_all_nodes) {
  2318. // J->CNode->Graph
  2319. std::set<FuncGraphPtr> graph_set;
  2320. for (auto &node : root_all_nodes) {
  2321. MS_EXCEPTION_IF_NULL(node);
  2322. if (!node->isa<CNode>()) {
  2323. continue;
  2324. }
  2325. auto cnode = node->cast<CNodePtr>();
  2326. if ((cnode->size() < 2) || !IsValueNode<Primitive>(cnode->input(0))) {
  2327. continue;
  2328. }
  2329. auto expect_prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2330. if (expect_prim->name() != J && expect_prim->name() != SHARD) {
  2331. continue;
  2332. }
  2333. if (IsValueNode<FuncGraph>(cnode->input(1))) {
  2334. auto graph = GetValueNode<FuncGraphPtr>(cnode->input(1));
  2335. MS_LOG(DEBUG) << "Find the forward graph success";
  2336. graph_set.insert(graph);
  2337. auto manager = graph->manager();
  2338. MS_EXCEPTION_IF_NULL(manager);
  2339. auto graph_used = manager->func_graphs_used_total(graph);
  2340. for (auto &sub_graph : graph_used) {
  2341. graph_set.insert(sub_graph);
  2342. }
  2343. }
  2344. }
  2345. return graph_set;
  2346. }
  2347. void StepSplitSens(const std::pair<CNodePtr, LossNodeInfo> &sens_loss_pair) {
  2348. CNodePtr sens_node = sens_loss_pair.first;
  2349. auto loss_node = sens_loss_pair.second;
  2350. auto loss_grad_layout = GetLossNodeGradOutputLayout(loss_node);
  2351. if (!loss_grad_layout.empty()) {
  2352. SplitSens(sens_node, loss_grad_layout[0]);
  2353. }
  2354. }
  2355. bool IsPynativeParallel() {
  2356. auto parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  2357. auto execution_mode = MsContext::GetInstance()->get_param<int>(MS_CTX_EXECUTION_MODE);
  2358. return (execution_mode == kPynativeMode) && (parallel_mode == kSemiAutoParallel || parallel_mode == kAutoParallel);
  2359. }
  2360. // Sens node satisfies the following conditions: cnode(sens)-->cnode(tuple_getitem)-->cnode-->cnode(J)
  2361. std::vector<std::pair<CNodePtr, LossNodeInfo>> GetSensLossPairs(const FuncGraphPtr &root) {
  2362. MS_EXCEPTION_IF_NULL(root);
  2363. std::vector<std::pair<CNodePtr, LossNodeInfo>> sens_loss_pairs;
  2364. for (auto &node : root->nodes()) {
  2365. if (!node->isa<CNode>()) {
  2366. continue;
  2367. }
  2368. // cnode(sens)-->cnode(tuple_getitem)
  2369. auto sens_cnode = node->cast<CNodePtr>();
  2370. AnfNodePtr expect_tuple_getitem = sens_cnode->input(0);
  2371. MS_EXCEPTION_IF_NULL(expect_tuple_getitem);
  2372. if (!expect_tuple_getitem->isa<CNode>()) {
  2373. continue;
  2374. }
  2375. auto expect_tuple_getitem_cnode = expect_tuple_getitem->cast<CNodePtr>();
  2376. if (!IsSomePrimitive(expect_tuple_getitem_cnode, prim::kTupleGetItem)) {
  2377. continue;
  2378. }
  2379. // cnode(sens)-->cnode(tuple_getitem)-->cnode
  2380. AnfNodePtr expect_anonymous = expect_tuple_getitem_cnode->input(1);
  2381. MS_EXCEPTION_IF_NULL(expect_anonymous);
  2382. if (!expect_anonymous->isa<CNode>()) {
  2383. continue;
  2384. }
  2385. // cnode(sens)-->cnode(tuple_getitem)-->cnode-->cnode(J)
  2386. auto expect_anonymous_cnode = expect_anonymous->cast<CNodePtr>();
  2387. AnfNodePtr expect_j = expect_anonymous_cnode->input(0);
  2388. MS_EXCEPTION_IF_NULL(expect_j);
  2389. if (!expect_j->isa<CNode>()) {
  2390. continue;
  2391. }
  2392. auto expect_j_cnode = expect_j->cast<CNodePtr>();
  2393. if (!IsSomePrimitive(expect_j_cnode, J)) {
  2394. continue;
  2395. }
  2396. if (!IsValueNode<FuncGraph>(expect_j_cnode->input(1))) {
  2397. MS_LOG(EXCEPTION) << "Sens can't find the corresponding graph.";
  2398. }
  2399. auto func_graph = GetValueNode<FuncGraphPtr>(expect_j_cnode->input(1));
  2400. auto loss_node_info = FindLossCNode(func_graph, 0);
  2401. if (loss_node_info.loss_node == nullptr) {
  2402. MS_LOG(WARNING) << "Can not find the loss cnode";
  2403. continue;
  2404. }
  2405. std::pair<CNodePtr, LossNodeInfo> sens_loss_pair = std::make_pair(sens_cnode, loss_node_info);
  2406. sens_loss_pairs.push_back(sens_loss_pair);
  2407. }
  2408. return sens_loss_pairs;
  2409. }
  2410. void ParallelCommunication(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes,
  2411. const FuncGraphManagerPtr &manager) {
  2412. MS_EXCEPTION_IF_NULL(root);
  2413. MS_EXCEPTION_IF_NULL(manager);
  2414. TensorRedistribution tensor_redistribution;
  2415. std::vector<std::pair<CNodePtr, LossNodeInfo>> sens_loss_pairs = GetSensLossPairs(root);
  2416. bool has_backward = !sens_loss_pairs.empty();
  2417. // split sens must before inserting the operators.
  2418. for (auto &pair : sens_loss_pairs) {
  2419. // If the shape of grad-sens tensor is not [] or [1], use get tensor slice to handle it.
  2420. // If the type of sens node is not Tensor, it is unsupported now, do nothing default.
  2421. if (IsLastStage()) {
  2422. StepSplitSens(pair);
  2423. }
  2424. }
  2425. for (auto &node : all_nodes) {
  2426. MS_EXCEPTION_IF_NULL(node);
  2427. if (node->isa<CNode>()) {
  2428. auto cnode = node->cast<CNodePtr>();
  2429. // the make_tuple is parallel care node, but it may have not operator info
  2430. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>()) {
  2431. continue;
  2432. }
  2433. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  2434. MS_EXCEPTION_IF_NULL(distribute_operator);
  2435. // skip Send Receive
  2436. if (!cnode->HasPrimalAttr(PIPELINE_PARAM)) {
  2437. // insert forward ops
  2438. InsertForwardOps(distribute_operator, cnode);
  2439. // insert redistribution ops
  2440. StepRedistribution(cnode, distribute_operator, cnode, tensor_redistribution, cnode);
  2441. }
  2442. // insert backward ops
  2443. if (has_backward || IsPynativeParallel()) {
  2444. BackwardCommunication(root, distribute_operator, cnode, sens_loss_pairs);
  2445. }
  2446. distribute_operator->ReplaceNodeInputOrAttrs();
  2447. } else if (IsValueNode<Tensor>(node) || IsValueNode<ValueList>(node) || IsValueNode<ValueTuple>(node)) {
  2448. StepSplitTensor(node, manager);
  2449. }
  2450. }
  2451. for (auto &node : all_nodes) {
  2452. MS_EXCEPTION_IF_NULL(node);
  2453. if (node->isa<CNode>()) {
  2454. auto cnode = node->cast<CNodePtr>();
  2455. if (!IsParallelCareNode(cnode) || !cnode->has_user_data<OperatorInfo>() || IsSomePrimitive(cnode, RECEIVE) ||
  2456. IsSomePrimitive(cnode, SEND)) {
  2457. continue;
  2458. }
  2459. OperatorInfoPtr distribute_operator = GetDistributeOperator(cnode);
  2460. MS_EXCEPTION_IF_NULL(distribute_operator);
  2461. // StepReplace
  2462. StepReplace(distribute_operator, cnode);
  2463. }
  2464. }
  2465. }
  2466. bool IsCohesiveNode(const CNodePtr &cnode) {
  2467. return IsPrimitiveCNode(cnode, prim::kPrimCast) || IsPrimitiveCNode(cnode, prim::kPrimLoad) ||
  2468. IsPrimitiveCNode(cnode, prim::kPrimAllGather) || IsPrimitiveCNode(cnode, prim::kPrimMiniStepAllGather) ||
  2469. IsPrimitiveCNode(cnode, prim::kPrimMicroStepAllGather);
  2470. }
  2471. ParameterMap NodeParameterName(const CNodePtr &node, int64_t index, size_t curr_depth) {
  2472. if (curr_depth > MAX_RECURSIVE_DEPTH) {
  2473. MS_LOG(WARNING) << "When finding the parameters' name of a operator, exceeded the maximum depth: "
  2474. << MAX_RECURSIVE_DEPTH;
  2475. return {};
  2476. }
  2477. std::vector<AnfNodePtr> node_inputs{node->inputs()};
  2478. ParameterMap param_names;
  2479. for (int64_t i = 0; i < UlongToLong(node_inputs.size()); ++i) {
  2480. int64_t idx = index > i ? index : i;
  2481. auto input = node_inputs[LongToSize(i)];
  2482. if (input->isa<Parameter>()) {
  2483. auto input_parameter = input->cast<ParameterPtr>();
  2484. if (input_parameter->has_default() && ParameterRequireGrad(input_parameter)) {
  2485. (void)param_names.emplace_back(std::make_pair(input_parameter->name(), input_parameter));
  2486. }
  2487. } else if (input->isa<CNode>()) {
  2488. CNodePtr cnode = input->cast<CNodePtr>();
  2489. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2490. continue;
  2491. }
  2492. if (IsCohesiveNode(cnode) && cnode->inputs().size() >= 1) {
  2493. auto input_param_names = NodeParameterName(cnode, idx, 0);
  2494. param_names.insert(param_names.end(), input_param_names.begin(), input_param_names.end());
  2495. }
  2496. }
  2497. }
  2498. return param_names;
  2499. }
  2500. bool IsGatherInfo(const std::string &name) {
  2501. std::vector<std::string> gather_info_names = {"GatherInfo", "SparseGatherV2Info", "EmbeddingLookupInfo"};
  2502. for (std::string info_name : gather_info_names) {
  2503. if (name.find(info_name) != std::string::npos) {
  2504. return true;
  2505. }
  2506. }
  2507. return false;
  2508. }
  2509. void CheckpointStrategy(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphPtr &root) {
  2510. StrategyMap stra_map;
  2511. TensorInfoMap tensor_info_map;
  2512. ManualShapeMap manual_shape_map;
  2513. for (auto &node : all_nodes) {
  2514. MS_EXCEPTION_IF_NULL(node);
  2515. auto cnode = node->cast<CNodePtr>();
  2516. if ((cnode == nullptr) || !IsValueNode<Primitive>(cnode->input(0))) {
  2517. continue;
  2518. }
  2519. auto param_names = NodeParameterName(cnode, -1, 0);
  2520. if (param_names.empty()) {
  2521. continue;
  2522. }
  2523. string param_name = param_names[0].first;
  2524. PrimitivePtr prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2525. MS_EXCEPTION_IF_NULL(prim);
  2526. OperatorInfoPtr operator_info = cnode->user_data<OperatorInfo>();
  2527. if (operator_info) {
  2528. if (operator_info->name().find(RESHAPEINFO) != std::string::npos) {
  2529. continue;
  2530. }
  2531. std::string stratey_key_name = prim->name() + "_" + param_name;
  2532. stra_map[stratey_key_name] = operator_info->strategy();
  2533. for (auto param_name_pair : param_names) {
  2534. tensor_info_map[param_name_pair.first] = param_name_pair.second->user_data<TensorLayout>();
  2535. }
  2536. if (IsGatherInfo(operator_info->name())) {
  2537. auto gather_info = std::dynamic_pointer_cast<GatherInfo>(operator_info);
  2538. auto param_split_shapes = gather_info->param_split_shapes();
  2539. auto index_offsets = gather_info->index_offsets();
  2540. if (param_split_shapes.size() != index_offsets.size()) {
  2541. MS_LOG(EXCEPTION) << "In manual split, the param_split_shapes and index_offsets length should be same.";
  2542. }
  2543. std::vector<std::pair<int64_t, int64_t>> manual_shape;
  2544. for (int64_t i = 0; i < UlongToLong(param_split_shapes.size()); ++i) {
  2545. (void)manual_shape.emplace_back(
  2546. std::make_pair(param_split_shapes[LongToSize(i)], index_offsets[LongToSize(i)]));
  2547. }
  2548. manual_shape_map[param_name] = manual_shape;
  2549. }
  2550. }
  2551. }
  2552. for (auto &cloned_parameter_node : root->parameters()) {
  2553. MS_EXCEPTION_IF_NULL(cloned_parameter_node);
  2554. auto cloned_parameter = cloned_parameter_node->cast<ParameterPtr>();
  2555. MS_EXCEPTION_IF_NULL(cloned_parameter);
  2556. if (!ParameterIsCloned(cloned_parameter_node)) {
  2557. continue;
  2558. }
  2559. std::string cloned_param_name = cloned_parameter_node->cast<ParameterPtr>()->name();
  2560. auto cloned_param_layout = cloned_parameter_node->user_data<TensorLayout>();
  2561. if (cloned_param_layout == nullptr) {
  2562. continue;
  2563. }
  2564. tensor_info_map[cloned_param_name] = cloned_param_layout;
  2565. }
  2566. if (StrategyCheckpoint::GetInstance().Save(stra_map, tensor_info_map, &manual_shape_map) != SUCCESS) {
  2567. MS_LOG(EXCEPTION) << "Save strategy checkpoint failed";
  2568. }
  2569. }
  2570. void SetForwardFlag(const std::vector<AnfNodePtr> &all_nodes) {
  2571. for (auto &node : all_nodes) {
  2572. MS_EXCEPTION_IF_NULL(node);
  2573. if (!node->isa<CNode>()) {
  2574. continue;
  2575. }
  2576. auto cnode = node->cast<CNodePtr>();
  2577. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2578. continue;
  2579. }
  2580. // CNode is globally unique.
  2581. MS_LOG(DEBUG) << "Set forward flag " << cnode->DebugString() << ".";
  2582. cnode->set_in_forward_flag(true);
  2583. }
  2584. }
  2585. void SetForwardFlag(const AnfNodeSet &all_nodes) {
  2586. for (auto &node : all_nodes) {
  2587. MS_EXCEPTION_IF_NULL(node);
  2588. if (!node->isa<CNode>()) {
  2589. continue;
  2590. }
  2591. auto cnode = node->cast<CNodePtr>();
  2592. if (!IsValueNode<Primitive>(cnode->input(0))) {
  2593. continue;
  2594. }
  2595. // CNode is globally unique.
  2596. cnode->set_in_forward_flag(true);
  2597. }
  2598. }
  2599. std::set<FuncGraphPtr> ForwardGraph(const FuncGraphPtr &root) {
  2600. MS_EXCEPTION_IF_NULL(root);
  2601. const auto &all_nodes = root->nodes();
  2602. std::set<FuncGraphPtr> graph_set = FindForwardGraphByRootNodes(all_nodes);
  2603. return graph_set;
  2604. }
  2605. std::vector<AnfNodePtr> FindRootForwardCNode(const FuncGraphPtr &graph, const AnfNodeSet &all_nodes) {
  2606. MS_EXCEPTION_IF_NULL(graph);
  2607. std::vector<AnfNodePtr> root_forward_nodes;
  2608. auto loss_cnode = FindLossCNode(graph, 0).loss_node;
  2609. if (loss_cnode == nullptr) {
  2610. MS_LOG(WARNING) << "Can not find the loss cnode";
  2611. return root_forward_nodes;
  2612. }
  2613. auto loss_cnode_id = loss_cnode->UniqueIdThroughCopy();
  2614. for (auto &node : all_nodes) {
  2615. MS_EXCEPTION_IF_NULL(node);
  2616. if (!node->isa<CNode>()) {
  2617. continue;
  2618. }
  2619. auto cnode = node->cast<CNodePtr>();
  2620. auto root_node_id = node->UniqueIdThroughCopy();
  2621. if (loss_cnode_id == root_node_id) {
  2622. root_forward_nodes = DeepLinkedGraphSearch(cnode);
  2623. break;
  2624. }
  2625. }
  2626. return root_forward_nodes;
  2627. }
  2628. void InsertShapeOp(const CNodePtr &node, const AnfNodePtr &pre_node, const FuncGraphPtr &root) {
  2629. // shape op doesn't have params and attrs.
  2630. OperatorParams params;
  2631. OperatorAttrs attrs;
  2632. auto shape_value = GetValueNode(node->input(2))->cast<ValueSequencePtr>();
  2633. MS_EXCEPTION_IF_NULL(shape_value);
  2634. auto shape = shape_value->value();
  2635. if (shape.empty()) {
  2636. return;
  2637. }
  2638. OperatorArgs args = std::make_pair(attrs, params);
  2639. Operator op = std::make_pair(SHAPE_OP, args);
  2640. InsertNode(op, node, 2, pre_node, root, "shape");
  2641. }
  2642. static AnfNodePtr FindGrad(const CNodePtr &cnode, size_t curr_depth) {
  2643. if (curr_depth > MAX_RECURSIVE_DEPTH) {
  2644. MS_LOG(WARNING) << "When finding Grad nodes, exceeded the maximum recursion depth: " << MAX_RECURSIVE_DEPTH;
  2645. return nullptr;
  2646. }
  2647. for (auto &node : cnode->inputs()) {
  2648. if (!node->isa<CNode>()) {
  2649. continue;
  2650. }
  2651. if (!IsPrimitiveCNode(node, prim::kPrimEnvironGet)) {
  2652. return FindGrad(node->cast<CNodePtr>(), ++curr_depth);
  2653. } else {
  2654. return node;
  2655. }
  2656. }
  2657. return nullptr;
  2658. }
  2659. void HandleRootReshapeAndSaveStrategy(const std::vector<AnfNodePtr> &all_nodes) {
  2660. // If root graph has reshape op. Find the corresponding parameter.
  2661. // Reshape's shape is the shape of the parameter.
  2662. auto executor = pipeline::GraphExecutorPy::GetInstance();
  2663. for (auto &node : all_nodes) {
  2664. if (!node->isa<CNode>()) {
  2665. continue;
  2666. }
  2667. auto cnode = node->cast<CNodePtr>();
  2668. if (!IsValueNode<Primitive>(cnode->input(0)) || cnode == nullptr) {
  2669. continue;
  2670. }
  2671. if (cnode->in_forward_flag()) {
  2672. // Save strategy in executor
  2673. OperatorInfoPtr op_info = cnode->user_data<OperatorInfo>();
  2674. if (op_info) {
  2675. auto stra_ptr = op_info->strategy();
  2676. if (stra_ptr) {
  2677. auto strategy = stra_ptr->GetInputDim();
  2678. // fullname with scope should be found in step parallel end ir
  2679. executor->SetCNodeStrategy(cnode->fullname_with_scope(), strategy);
  2680. }
  2681. }
  2682. continue;
  2683. }
  2684. auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
  2685. if (prim->name() != RESHAPE) {
  2686. continue;
  2687. }
  2688. Shape origin_dst_shape = GetValue<std::vector<int64_t>>(cnode->input(2)->cast<ValueNodePtr>()->value());
  2689. if (origin_dst_shape.size() == 1 && origin_dst_shape[0] == -1) {
  2690. continue;
  2691. }
  2692. auto root = node->func_graph();
  2693. auto grad_node = FindGrad(cnode, 0);
  2694. if (grad_node) {
  2695. InsertShapeOp(cnode, grad_node, root);
  2696. }
  2697. }
  2698. }
  2699. void MarkForwardCNode(const FuncGraphPtr &root) {
  2700. MS_EXCEPTION_IF_NULL(root);
  2701. auto all_nodes = root->nodes();
  2702. auto graph_set = FindForwardGraphByRootNodes(all_nodes);
  2703. if (graph_set.empty()) {
  2704. MS_LOG(INFO) << "Can not find the forward graph, so mark the ops in root graph";
  2705. SetForwardFlag(all_nodes);
  2706. } else {
  2707. for (auto &func_graph : graph_set) {
  2708. MS_LOG(INFO) << "The sub graph size of root is " << root->func_graphs_used().size();
  2709. auto return_node = func_graph->get_return();
  2710. MS_EXCEPTION_IF_NULL(return_node);
  2711. auto all_dfs_nodes = DeepLinkedGraphSearch(return_node);
  2712. SetForwardFlag(all_dfs_nodes);
  2713. auto root_forward_nodes = FindRootForwardCNode(func_graph, all_nodes);
  2714. if (root_forward_nodes.empty()) {
  2715. continue;
  2716. }
  2717. // Mark forward flag for the nodes in root graph.
  2718. SetForwardFlag(root_forward_nodes);
  2719. }
  2720. }
  2721. }
  2722. CommInfo GetCommInfo() {
  2723. int64_t device_num = ParallelContext::GetInstance()->device_num();
  2724. int64_t global_rank = ParallelContext::GetInstance()->global_rank();
  2725. auto ms_context = MsContext::GetInstance();
  2726. MS_EXCEPTION_IF_NULL(ms_context);
  2727. std::string backend = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  2728. std::string world_group;
  2729. std::string communication_backend;
  2730. if (backend == kAscendDevice || backend == kDavinciDevice) {
  2731. world_group = HCCL_WORLD_GROUP;
  2732. communication_backend = HCCL_BACKEND;
  2733. } else if (backend == kGPUDevice) {
  2734. world_group = NCCL_WORLD_GROUP;
  2735. communication_backend = NCCL_BACKEND;
  2736. } else {
  2737. MS_LOG(EXCEPTION) << "Invalid communication backend: " << backend;
  2738. }
  2739. uint32_t world_rank_size = 0;
  2740. if (!CommManager::GetInstance().GetRankSize(world_group, &world_rank_size)) {
  2741. MS_LOG(EXCEPTION) << "Get rank size failed";
  2742. }
  2743. if (!ParallelContext::GetInstance()->device_num_is_set()) {
  2744. device_num = UintToInt(world_rank_size);
  2745. MS_LOG(INFO) << "Get device num from communication model, the device num is " << device_num;
  2746. }
  2747. #if ENABLE_D || ENABLE_GPU
  2748. if (ParallelContext::GetInstance()->device_num_is_set() && world_rank_size != device_num &&
  2749. !ParallelContext::GetInstance()->hccl_test_available()) {
  2750. // hccl_test_available is used when we compile graphs in real ascend card environment, but with hccl_test.
  2751. MS_LOG(EXCEPTION) << "The device_num " << device_num << " set in the context is not consist with "
  2752. << world_rank_size << " devices you have"
  2753. << ". Please check your rank_table file(for Ascend) or host file(for GPU).";
  2754. }
  2755. #endif
  2756. uint32_t rank_id = 0;
  2757. if (!ParallelContext::GetInstance()->global_rank_is_set()) {
  2758. if (!CommManager::GetInstance().GetRankID(world_group, &rank_id)) {
  2759. MS_LOG(EXCEPTION) << "Get rank id failed";
  2760. }
  2761. global_rank = UintToInt(rank_id);
  2762. MS_LOG(INFO) << "Get global rank from communication model, the global rank is " << global_rank;
  2763. }
  2764. CommInfo comm_info{device_num, global_rank, world_group, communication_backend};
  2765. return comm_info;
  2766. }
  2767. Status ParallelInit() {
  2768. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  2769. int32_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2770. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  2771. if (split_stage_num <= 0) {
  2772. MS_LOG(ERROR) << "The parameter 'split_stage_num' must be a positive number, but got the value : "
  2773. << split_stage_num;
  2774. return FAILED;
  2775. }
  2776. auto comm_info = GetCommInfo();
  2777. int64_t device_num = comm_info.device_num;
  2778. int64_t global_rank = comm_info.global_rank;
  2779. if ((device_num <= 0) || (device_num > MAX_DEVICE_NUM)) {
  2780. MS_LOG(ERROR) << "The context configuration parameter 'device_num' must be positive, "
  2781. "but got the value of device_num: "
  2782. << device_num;
  2783. return FAILED;
  2784. }
  2785. // the device_num maybe get from communication interface
  2786. if (device_num % split_stage_num != 0) {
  2787. MS_LOG(ERROR) << "The parameter 'device_num' must be divided by 'split_stage_num', but got the device_num : "
  2788. << device_num << "and the split_stage_num : " << split_stage_num;
  2789. return FAILED;
  2790. }
  2791. if ((global_rank < 0) || (global_rank >= device_num)) {
  2792. MS_LOG(ERROR) << "The parameter 'global_rank' must be greater than 0 and less equal 'device num', "
  2793. "but got the global_rank : "
  2794. << global_rank << "and the device_num : " << device_num;
  2795. return FAILED;
  2796. }
  2797. std::vector<int64_t> stages;
  2798. for (int i = 0; i < split_stage_num; i++) {
  2799. stages.push_back(device_num / split_stage_num);
  2800. }
  2801. if ((split_stage_num > 1) && (parallel_mode != kSemiAutoParallel)) {
  2802. MS_LOG(ERROR) << "To enable the pipeline parallel, please set the parallel mode to " << kSemiAutoParallel;
  2803. return FAILED;
  2804. }
  2805. if (!InitDevice(device_num, global_rank, comm_info.communication_backend, stages)) {
  2806. MS_LOG(ERROR) << "Init device failed";
  2807. return FAILED;
  2808. }
  2809. MS_LOG(INFO) << "The parallel context: device_num: " << device_num << ", global_rank: " << global_rank
  2810. << ", communication_backend: " << comm_info.communication_backend
  2811. << ", gradients_mean: " << ParallelContext::GetInstance()->gradients_mean()
  2812. << ", gradient_fp32_sync: " << ParallelContext::GetInstance()->gradient_fp32_sync();
  2813. return SUCCESS;
  2814. }
  2815. void HandleForwardMakeTupleAndMakeList(const std::vector<AnfNodePtr> &all_nodes) {
  2816. for (auto &node : all_nodes) {
  2817. if (!AnfNodeIsPrimitive(node, MAKE_TUPLE) && !AnfNodeIsPrimitive(node, MAKE_LIST)) {
  2818. continue;
  2819. }
  2820. auto cnode = node->cast<CNodePtr>();
  2821. MS_EXCEPTION_IF_NULL(cnode);
  2822. if (!cnode->in_forward_flag()) {
  2823. continue;
  2824. }
  2825. FuncGraphManagerPtr manager = cnode->func_graph()->manager();
  2826. MS_EXCEPTION_IF_NULL(manager);
  2827. // MakeTuple has multiple users, each user's TensorInfo must be same.
  2828. auto make_tuple_list_next_node = CheckMakeTupleSplit(node, manager);
  2829. if (make_tuple_list_next_node == nullptr) {
  2830. continue;
  2831. }
  2832. auto make_tuple_list_next_cnode = make_tuple_list_next_node->cast<CNodePtr>();
  2833. MS_EXCEPTION_IF_NULL(make_tuple_list_next_cnode);
  2834. OperatorInfoPtr op_info = GetDistributeOperator(make_tuple_list_next_cnode);
  2835. MS_EXCEPTION_IF_NULL(op_info);
  2836. cnode->set_user_data<OperatorInfo>(op_info);
  2837. }
  2838. }
  2839. bool CreateGroupsByCkptFile(const std::string &file) {
  2840. GroupInfoMap group_info_map;
  2841. if (StrategyCheckpoint::GetInstance().LoadGroupInfo(file, &group_info_map) != SUCCESS) {
  2842. return false;
  2843. }
  2844. if (CreateGroups(group_info_map) != SUCCESS) {
  2845. return false;
  2846. }
  2847. MS_LOG(INFO) << "Create groups by checkpoint file success";
  2848. return true;
  2849. }
  2850. void ReorderForPipelineSplit(const FuncGraphPtr &root, const FuncGraphManagerPtr &manager, int64_t pipeline_stages) {
  2851. if (!root->has_flag(BACKWARD) && pipeline_stages > 1) {
  2852. root->set_flag(BACKWARD, true);
  2853. if (root->has_flag(kTraining)) {
  2854. Reorder(root);
  2855. } else {
  2856. ReorderForPredict(root, manager);
  2857. }
  2858. }
  2859. }
  2860. bool IsInsertVirtualOutput(const FuncGraphPtr &root) {
  2861. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  2862. auto comm_info = GetCommInfo();
  2863. int64_t split_stage_num = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2864. int64_t per_stage_device_num = comm_info.device_num / split_stage_num;
  2865. int64_t current_stage = comm_info.global_rank / per_stage_device_num;
  2866. MS_LOG(INFO) << "The current stage is: " << current_stage;
  2867. if (!root->has_flag(kTraining) && !ParallelContext::GetInstance()->dataset_strategy().empty()) {
  2868. MS_LOG(WARNING) << "In eval/predict net, the output parallel strategy would not follow "
  2869. "the input parallel strategy when using context.set_auto_parallel_context(dataset_strategy)"
  2870. " to configure the input strategy.";
  2871. }
  2872. return ((!root->has_flag(kTraining) && ParallelContext::GetInstance()->dataset_strategy().empty() &&
  2873. current_stage == split_stage_num - 1) ||
  2874. IsPynativeParallel());
  2875. }
  2876. static void HandleGroupInfo(const FuncGraphPtr &root) {
  2877. auto group_info = g_device_manager->group_info();
  2878. auto group_info_save_path = common::GetEnv("GROUP_INFO_FILE");
  2879. if (!group_info_save_path.empty()) {
  2880. ParallelContext::GetInstance()->set_group_ckpt_save_file(group_info_save_path);
  2881. }
  2882. if (StrategyCheckpoint::GetInstance().group_info_save_on()) {
  2883. RankList comm_group = FindCommonMirrorGroup(root);
  2884. if (StrategyCheckpoint::GetInstance().SaveGroupInfo(group_info, comm_group) != SUCCESS) {
  2885. MS_LOG(EXCEPTION) << "Save group info failed";
  2886. }
  2887. }
  2888. }
  2889. static void HandleDataParallel() {
  2890. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  2891. if (parallel_mode == kDataParallel) {
  2892. auto group_info_save_path = common::GetEnv("GROUP_INFO_FILE");
  2893. if (!group_info_save_path.empty()) {
  2894. std::vector<std::pair<std::string, std::vector<uint32_t>>> group_info;
  2895. int64_t device_num = GetCommInfo().device_num;
  2896. RankList comm_group;
  2897. for (size_t i = 0; i < size_t(device_num); ++i) {
  2898. comm_group.push_back(i);
  2899. }
  2900. ParallelContext::GetInstance()->set_group_ckpt_save_file(group_info_save_path);
  2901. if (StrategyCheckpoint::GetInstance().SaveGroupInfo(group_info, comm_group) != SUCCESS) {
  2902. MS_LOG(EXCEPTION) << "Save group info failed";
  2903. }
  2904. }
  2905. }
  2906. }
  2907. static void PipelinePreProcess(const FuncGraphPtr &root, const FuncGraphManagerPtr &manager,
  2908. const std::vector<AnfNodePtr> &all_nodes) {
  2909. auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2910. if (pipeline_stages > 1) {
  2911. HandleMicroBatch(all_nodes, manager);
  2912. ParameterStartNode(all_nodes, manager);
  2913. LastStageEndNode(all_nodes, manager, root);
  2914. }
  2915. }
  2916. static void PipelinePostProcess(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
  2917. auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2918. if (pipeline_stages > 1) {
  2919. AddVirtualAssignAdd(root);
  2920. HandleReceiveParam(root, all_nodes);
  2921. LabelGenMaskMicro(root);
  2922. }
  2923. }
  2924. static void InsertAllReduceForNormValue(const AnfNodePtr &res_node) {
  2925. auto cnode = res_node->cast<CNodePtr>();
  2926. auto graphs = res_node->func_graph();
  2927. MS_EXCEPTION_IF_NULL(graphs);
  2928. auto manager = graphs->manager();
  2929. MS_EXCEPTION_IF_NULL(manager);
  2930. auto node_user_map = manager->node_users();
  2931. if (!IsSomePrimitive(cnode, EXPAND_DIMS)) {
  2932. MS_LOG(ERROR) << "Expected the operator expand_dims, but found the " << GetPrimName(cnode)
  2933. << "This may cause the calculation of the global norm incorrect";
  2934. return;
  2935. }
  2936. auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
  2937. auto expand_dims_node = node_user_map.at(res_node).front().first;
  2938. auto sqrt_node = MatchPattern(expand_dims_node, node_user_map, REDUCE_SUM_MATCH_PATTERN);
  2939. if (!sqrt_node) return;
  2940. auto cur_stage_rank_list = g_device_manager->GetDeviceListInThisStage();
  2941. Group cur_stage_device_list = g_device_manager->CreateGroup(cur_stage_rank_list);
  2942. InsertAllReduceToNodeInput(sqrt_node->cast<CNodePtr>(), cur_stage_device_list.name(), PARALLEL_GLOBALNORM);
  2943. MS_LOG(INFO) << "Insert the AllReduce for global norm value in stages succeed.";
  2944. if (pipeline_stages > 1) {
  2945. MS_LOG(INFO) << "Insert the AllReduce for global norm value between stages succeed.";
  2946. auto ranks_between_stages = g_device_manager->GetDeviceListBetweenStage();
  2947. Group group_between_stages = g_device_manager->CreateGroup(ranks_between_stages);
  2948. InsertAllReduceToNodeInput(sqrt_node->cast<CNodePtr>(), group_between_stages.name(), PARALLEL_GLOBALNORM_BETWEEN);
  2949. }
  2950. }
  2951. AnfNodePtr FindExpanDimsWIthGradScale(const AnfNodePtr &node_ptr, const NodeUsersMap &node_users_map, uint32_t limits) {
  2952. std::queue<AnfNodePtr> visited;
  2953. AnfNodePtr queue_node = nullptr;
  2954. CNodePtr cnode = nullptr;
  2955. AnfNodePtr last_node = nullptr;
  2956. uint32_t depth = 0;
  2957. if (!node_ptr) {
  2958. return nullptr;
  2959. }
  2960. visited.push(node_ptr);
  2961. while (!visited.empty()) {
  2962. queue_node = visited.front();
  2963. visited.pop();
  2964. cnode = queue_node->cast<CNodePtr>();
  2965. // MAKE_TUPLE will not appear after the load in the forward graph
  2966. if (IsSomePrimitive(cnode, EXPAND_DIMS)) {
  2967. auto value = GetAttrsFromAnfNode(queue_node, GRAD_SCALE);
  2968. if (!value || !GetValue<bool>(value)) {
  2969. continue;
  2970. }
  2971. return queue_node;
  2972. }
  2973. if (!IsSomePrimitiveList(cnode, {ENVIRONGET, MUL, SQUARE, REDUCE_SUM, EXPAND_DIMS, DEPEND, CAST, REF_TO_EMBED})) {
  2974. continue;
  2975. }
  2976. auto node_set = node_users_map.at(queue_node);
  2977. for (auto &node_user : node_set) {
  2978. visited.push(node_user.first);
  2979. }
  2980. if (!last_node || last_node == queue_node) {
  2981. if (++depth == limits) {
  2982. break;
  2983. }
  2984. last_node = visited.back();
  2985. }
  2986. }
  2987. return nullptr;
  2988. }
  2989. static void InsertDivAndAllReduceForNorm(const NodeUsersMap &node_user_map, const AnfNodePtr &parameter,
  2990. uint32_t dev_num) {
  2991. AnfNodePtr expand_dims_node = nullptr;
  2992. AnfNodePtr prefix_node = nullptr;
  2993. auto params_user_set = node_user_map.at(parameter);
  2994. for (auto &param_pair : params_user_set) {
  2995. expand_dims_node = nullptr;
  2996. auto cnode = param_pair.first->cast<CNodePtr>();
  2997. MS_EXCEPTION_IF_NULL(cnode);
  2998. if (cnode->in_forward_flag()) {
  2999. continue;
  3000. }
  3001. expand_dims_node = FindExpanDimsWIthGradScale(cnode, node_user_map, MAX_BFS_DEPTH);
  3002. if (!expand_dims_node) {
  3003. continue;
  3004. }
  3005. auto value = GetAttrsFromAnfNode(expand_dims_node, GRAD_SCALE);
  3006. if (!value || !GetValue<bool>(value)) {
  3007. continue;
  3008. }
  3009. InsertRealDivOpToNodeInput(expand_dims_node->cast<CNodePtr>(), dev_num, PARALLEL_GLOBALNORM_DIV);
  3010. MS_LOG(INFO) << "Insert the realdiv with " << dev_num << " for the parameter " << parameter->DebugString()
  3011. << "succeed!";
  3012. // If already inserted allreduce, the pattern will not be matched and thus no allreduce will be inserted.
  3013. InsertAllReduceForNormValue(expand_dims_node);
  3014. }
  3015. }
  3016. static AnfNodePtr GetMirrorOp(const NodeUsersMap &node_user_map, const AnfNodePtr &parameter) {
  3017. auto params_user_set = node_user_map.at(parameter);
  3018. for (auto &param_pair : params_user_set) {
  3019. auto cnode = param_pair.first->cast<CNodePtr>();
  3020. std::vector<AnfNodePtr> candidate = {cnode};
  3021. if (!cnode->in_forward_flag()) {
  3022. continue;
  3023. }
  3024. if (IsInTrivialNodeList(cnode) || IsSomePrimitive(cnode, LOAD)) {
  3025. auto load_users = node_user_map.at(param_pair.first);
  3026. std::transform(load_users.begin(), load_users.end(), std::back_inserter(candidate),
  3027. [](const auto &v) { return v.first; });
  3028. }
  3029. for (auto &node : candidate) {
  3030. auto local_cnode = node->cast<CNodePtr>();
  3031. if (!IsPrimitiveCNode(local_cnode, prim::kPrimMirror) &&
  3032. !IsPrimitiveCNode(local_cnode, prim::kPrimMirrorMicroStep) &&
  3033. !IsPrimitiveCNode(local_cnode, prim::kPrimMirrorMiniStep)) {
  3034. continue;
  3035. }
  3036. return node;
  3037. }
  3038. }
  3039. return nullptr;
  3040. }
  3041. static void HandlGlobalNormScale(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes,
  3042. const FuncGraphManagerPtr &manager) {
  3043. auto parameters = root->parameters();
  3044. auto node_user_map = manager->node_users();
  3045. MS_LOG(INFO) << "Start to process the global norm";
  3046. for (auto &parameter : parameters) {
  3047. if (!ParameterRequireGrad(parameter)) continue;
  3048. auto mirror_node = GetMirrorOp(node_user_map, parameter);
  3049. if (!mirror_node) continue;
  3050. auto device_num_ptr = GetAttrsFromAnfNode(mirror_node, DEV_NUM);
  3051. if (!device_num_ptr) {
  3052. MS_LOG(ERROR) << "The mirror operator is excepted to have device number attribute, but found none. This "
  3053. "will cause the global norm calculation with wrong precision.";
  3054. continue;
  3055. }
  3056. if (!device_num_ptr->isa<Int64Imm>()) {
  3057. MS_LOG(ERROR) << "The type of device number attribute of mirror operator is not int64.";
  3058. continue;
  3059. }
  3060. auto dev_num = device_num_ptr->cast<Int64ImmPtr>()->value();
  3061. if (dev_num == 0) continue;
  3062. InsertDivAndAllReduceForNorm(node_user_map, parameter, dev_num);
  3063. }
  3064. }
  3065. bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer) {
  3066. #if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
  3067. if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
  3068. return false;
  3069. }
  3070. #endif
  3071. MS_EXCEPTION_IF_NULL(root);
  3072. MS_EXCEPTION_IF_NULL(optimizer);
  3073. MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance());
  3074. std::string parallel_mode = ParallelContext::GetInstance()->parallel_mode();
  3075. HandleDataParallel();
  3076. pipeline::ResourceBasePtr res = optimizer->resource();
  3077. MS_EXCEPTION_IF_NULL(res);
  3078. FuncGraphManagerPtr manager = res->manager();
  3079. MS_EXCEPTION_IF_NULL(manager);
  3080. auto pipeline_stages = ParallelContext::GetInstance()->pipeline_stage_split_num();
  3081. // assume no change to graph
  3082. bool changes = false;
  3083. // control whether use model_parallel mode
  3084. if (!root->has_flag(kAutoParallel) || ((parallel_mode != kAutoParallel) && (parallel_mode != kSemiAutoParallel)) ||
  3085. (root->has_flag(SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY))) {
  3086. if (!root->has_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY)) {
  3087. MS_LOG(WARNING) << "Strategies would be ignored in " << parallel_mode
  3088. << ", shard() only valid in [semi_]auto_parallel.";
  3089. root->set_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY, true);
  3090. }
  3091. ReorderForPipelineSplit(root, manager, pipeline_stages);
  3092. return changes;
  3093. }
  3094. struct timeval start_time, end_time;
  3095. (void)gettimeofday(&start_time, nullptr);
  3096. MS_LOG(INFO) << "Now entering step parallel";
  3097. DumpGraph(root, std::string(STEP_PARALLEL_BEGIN));
  3098. AnfNodePtr ret = root->get_return();
  3099. MS_EXCEPTION_IF_NULL(ret);
  3100. std::vector<AnfNodePtr> all_nodes = DeepScopedGraphSearch(ret);
  3101. std::reverse(all_nodes.begin(), all_nodes.end());
  3102. if (parallel_mode != kAutoParallel) {
  3103. TOTAL_OPS = 0;
  3104. if (pipeline_stages <= 1 && ParallelInit() != SUCCESS) {
  3105. MS_LOG(EXCEPTION) << "Parallel init failed";
  3106. }
  3107. PipelinePreProcess(root, manager, all_nodes);
  3108. // mark the forward cnodes, parallel only care these nodes
  3109. MarkForwardCNode(root);
  3110. if (FindCommunicationOp(all_nodes)) {
  3111. MS_LOG(EXCEPTION) << "The graph contain communication op";
  3112. }
  3113. if (IsInsertVirtualOutput(root)) {
  3114. InsertVirtualOutput(root, all_nodes);
  3115. AnfNodePtr ret_after = root->get_return();
  3116. MS_EXCEPTION_IF_NULL(ret_after);
  3117. all_nodes = DeepScopedGraphSearch(ret_after);
  3118. std::reverse(all_nodes.begin(), all_nodes.end());
  3119. }
  3120. // extract shape and strategy, set operator_info
  3121. ExtractInformation(all_nodes);
  3122. ReshapeInit(all_nodes);
  3123. }
  3124. SetCastForParamNotRecompute(all_nodes);
  3125. HandleRootReshapeAndSaveStrategy(all_nodes);
  3126. HandleForwardMakeTupleAndMakeList(all_nodes);
  3127. // if the input or parameter has multiple users, check whether its split strategies are consistent.
  3128. CheckParameterSplit(all_nodes);
  3129. HandleSymbolicKeyInstance(root, all_nodes);
  3130. // cover Parallel shape
  3131. CoverSliceShape(root);
  3132. // handle input is not used
  3133. HandleNoUsedParameter(root);
  3134. // set the shape for optimizer's clone tensor
  3135. SetClonedTensorShapeForOptimizer(root);
  3136. HandleAdaFactorOpt(root);
  3137. auto adasum_param_tensor_layout_map = AdaSumParamTensorLayout(root);
  3138. bool is_apply_adasum = HandleAdaSum(root, all_nodes, &adasum_param_tensor_layout_map);
  3139. // save strategy as checkpoint for multi-train
  3140. if (StrategyCheckpoint::GetInstance().SaveCheckPointOn()) {
  3141. CheckpointStrategy(all_nodes, root);
  3142. }
  3143. // ForwardCommunication BackwardCommunication TensorRedistribution
  3144. ParallelCommunication(root, all_nodes, manager);
  3145. if (is_apply_adasum) {
  3146. HandleMirrorInAdaSum(root, &adasum_param_tensor_layout_map);
  3147. }
  3148. PipelinePostProcess(root, all_nodes);
  3149. HandleGroupInfo(root);
  3150. // handle full split parammeters in grad accumulation, do not contain optimizer-sharding's parameter
  3151. HandleFullySplitParameters(root);
  3152. HandlGlobalNormScale(root, all_nodes, manager);
  3153. DumpGraph(root, std::string(STEP_PARALLEL_END));
  3154. // step parallel only run once
  3155. root->set_flag(SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY, true);
  3156. res->SetResult(pipeline::kStepParallelGraph, root);
  3157. // in auto parallel mode, no need to check if stategies set
  3158. root->set_flag(CHECK_SET_STRATEGY_VALID_ONCE_ONLY, true);
  3159. (void)gettimeofday(&end_time, nullptr);
  3160. uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
  3161. time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
  3162. MS_LOG(INFO) << "Now leaving step parallel, used time: " << time << " us";
  3163. return changes;
  3164. }
  3165. // Needed by rec_parser
  3166. std::vector<std::string> ExtractInputsTensorName(const CNodePtr &node) {
  3167. std::vector<std::string> name_inputs;
  3168. std::vector<AnfNodePtr> all_inputs = node->inputs();
  3169. std::vector<AnfNodePtr> node_inputs{all_inputs.begin() + 1, all_inputs.end()};
  3170. std::string node_id = node->UniqueId();
  3171. name_inputs.push_back(node_id);
  3172. for (auto &input : node_inputs) {
  3173. std::string name = input->UniqueId();
  3174. name_inputs.push_back(name);
  3175. }
  3176. return name_inputs;
  3177. }
  3178. } // namespace parallel
  3179. } // namespace mindspore