You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

parameter_manager.cc 24 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/parameter_manager.h"
  17. #include <inttypes.h>
  18. #include <sys/time.h>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <string>
  24. #include <unordered_map>
  25. #include <utility>
  26. #include "base/core_ops.h"
  27. #include "frontend/operator/ops.h"
  28. #include "frontend/optimizer/optimizer.h"
  29. #include "frontend/parallel/context.h"
  30. #include "frontend/parallel/device_manager.h"
  31. #include "frontend/parallel/graph_util/generate_graph.h"
  32. #include "frontend/parallel/graph_util/graph_info.h"
  33. #include "frontend/parallel/graph_util/node_info.h"
  34. #include "frontend/parallel/graph_util/pipeline_split_utils.h"
  35. #include "frontend/parallel/node_check.h"
  36. #include "ir/param_info.h"
  37. #include "ir/tensor.h"
  38. #include "utils/trace_base.h"
  39. #include "utils/comm_manager.h"
  40. #include "utils/ms_context.h"
  41. #include "utils/symbolic.h"
  42. #include "mindspore/core/utils/parallel_node_check.h"
  43. #include "frontend/parallel/step_parallel_utils.h"
  44. namespace mindspore {
  45. namespace parallel {
  46. static ParameterUsersInfo FindRefKeyNodeUsers(const RefKeyPair &ref_key_pair, bool (*IsCareNode)(const CNodePtr &)) {
  47. // Dealing with the RefKey case
  48. ParameterUsersInfo parameter_user_info;
  49. auto refkeys = ref_key_pair.second;
  50. auto cnode = ref_key_pair.first;
  51. auto cnode_ptr = cnode->cast<CNodePtr>();
  52. if ((cnode_ptr == nullptr) || !IsValueNode<Primitive>(cnode_ptr->input(0)) || !IsCareNode(cnode_ptr)) {
  53. return parameter_user_info;
  54. }
  55. if (refkeys.size() > 1) {
  56. MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << "'s inputs have more than 1 RefKeys";
  57. }
  58. MS_EXCEPTION_IF_NULL(cnode->func_graph());
  59. auto cnode_func_graph = cnode->func_graph();
  60. MS_EXCEPTION_IF_NULL(cnode->func_graph()->manager());
  61. // Find the RefKey being used
  62. auto candidate_set_by_refkey = cnode_func_graph->manager()->node_users()[refkeys[0]];
  63. for (auto &candidate : candidate_set_by_refkey) {
  64. auto candidate_node = candidate.first;
  65. auto c = candidate_node->cast<CNodePtr>();
  66. if ((c == nullptr) || !IsValueNode<Primitive>(c->input(0)) || !IsCareNode(c)) {
  67. continue;
  68. }
  69. parameter_user_info.second.second.add(candidate);
  70. }
  71. // Find the corresponding Parameter being used
  72. std::vector<AnfNodePtr> parameters = FindParameterByRefKeyNode(refkeys[0], cnode_func_graph);
  73. if (parameters.size() != 1) {
  74. MS_LOG(EXCEPTION) << "Find parameter by ref key node failed";
  75. }
  76. parameter_user_info.first = parameters[0]->cast<ParameterPtr>()->name();
  77. parameter_user_info.second.first = parameters[0];
  78. auto candidate_set_by_para = cnode_func_graph->manager()->node_users()[parameters[0]];
  79. for (auto &candidate : candidate_set_by_para) {
  80. auto candidate_node = candidate.first;
  81. auto c = candidate_node->cast<CNodePtr>();
  82. if ((c == nullptr) || !IsValueNode<Primitive>(c->input(0)) || !IsCareNode(c)) {
  83. continue;
  84. }
  85. (void)parameter_user_info.second.second.insert(candidate);
  86. }
  87. return parameter_user_info;
  88. }
  89. static ParameterUsersInfo FindParameterNodeUsers(const AnfNodePtr &node, bool (*IsCareNode)(const CNodePtr &)) {
  90. // In this case, node is a Parameter
  91. ParameterUsersInfo parameter_user_info;
  92. MS_EXCEPTION_IF_NULL(node->func_graph());
  93. MS_EXCEPTION_IF_NULL(node->func_graph()->manager());
  94. auto candidate_set = node->func_graph()->manager()->node_users()[node];
  95. for (auto &candidate : candidate_set) {
  96. auto candidate_node = candidate.first;
  97. if (IsPrimitiveCNode(candidate_node, prim::kPrimLoad)) {
  98. if (candidate.second != 1) {
  99. continue;
  100. }
  101. auto load_node_users = node->func_graph()->manager()->node_users()[candidate_node];
  102. for (auto &node_user : load_node_users) {
  103. auto cnode = node_user.first->cast<CNodePtr>();
  104. if (cnode == nullptr || !cnode->has_user_data<OperatorInfo>() || IsSomePrimitive(cnode, RECEIVE)) {
  105. continue;
  106. }
  107. (void)parameter_user_info.second.second.insert(node_user);
  108. }
  109. } else {
  110. auto c = candidate_node->cast<CNodePtr>();
  111. if (c == nullptr || !c->has_user_data<OperatorInfo>() || IsSomePrimitive(c, RECEIVE)) {
  112. continue;
  113. }
  114. (void)parameter_user_info.second.second.insert(candidate);
  115. }
  116. }
  117. parameter_user_info.first = node->cast<ParameterPtr>()->name();
  118. parameter_user_info.second.first = node;
  119. return parameter_user_info;
  120. }
  121. static RefKeyPair CNodeWithRefKeys(const AnfNodePtr &cnode) {
  122. MS_EXCEPTION_IF_NULL(cnode);
  123. std::vector<AnfNodePtr> refkeys;
  124. if (cnode->isa<CNode>()) {
  125. auto cnode_ptr = cnode->cast<CNodePtr>();
  126. auto inputs = cnode_ptr->inputs();
  127. for (auto &one_input : inputs) {
  128. if (IsValueNode<RefKey>(one_input)) {
  129. refkeys.push_back(one_input);
  130. }
  131. }
  132. if (refkeys.size() >= 1) {
  133. return std::make_pair(cnode, refkeys);
  134. }
  135. }
  136. return {nullptr, refkeys};
  137. }
  138. ParameterUsersInfo FindParameterUsers(const AnfNodePtr &node, bool (*IsCareNode)(const CNodePtr &)) {
  139. ParameterUsersInfo parameter_users_info;
  140. auto cnode_with_refkeys = CNodeWithRefKeys(node);
  141. if (cnode_with_refkeys.first != nullptr) {
  142. // the node is a ref key node
  143. return FindRefKeyNodeUsers(cnode_with_refkeys, IsCareNode);
  144. } else if (node->isa<Parameter>()) {
  145. // the node is a parameter node
  146. return FindParameterNodeUsers(node, IsCareNode);
  147. }
  148. return parameter_users_info;
  149. }
  150. static bool IsUsedParameter(const FuncGraphPtr &graph, const AnfNodePtr &parameter) {
  151. MS_EXCEPTION_IF_NULL(graph);
  152. MS_EXCEPTION_IF_NULL(parameter);
  153. auto manager = graph->manager();
  154. auto node_users = manager->node_users()[parameter];
  155. if (node_users.empty()) {
  156. return false;
  157. }
  158. for (auto node_user : node_users) {
  159. auto use_node = node_user.first->cast<CNodePtr>();
  160. if (IsValueNode<FuncGraph>(use_node->input(0))) {
  161. auto graph_sub = GetValueNode<FuncGraphPtr>(use_node->input(0));
  162. auto parameters = graph_sub->parameters();
  163. auto parameter_sub = parameters[node_user.second - 1];
  164. return IsUsedParameter(graph_sub, parameter_sub);
  165. }
  166. if (use_node->input(0)->isa<CNode>()) {
  167. auto cnode = use_node->input(0)->cast<CNodePtr>();
  168. if (!IsSomePrimitive(cnode, J) || !IsValueNode<FuncGraph>(cnode->input(1))) {
  169. return true;
  170. }
  171. auto graph_sub = GetValueNode<FuncGraphPtr>(cnode->input(1));
  172. auto parameters = graph_sub->parameters();
  173. auto parameter_sub = parameters[node_user.second - 1];
  174. return IsUsedParameter(graph_sub, parameter_sub);
  175. }
  176. return true;
  177. }
  178. return true;
  179. }
  180. static RankList GetGroupByTensorInfo(const TensorInfo &tensor_info) {
  181. CheckGlobalDeviceManager();
  182. int64_t rank = g_device_manager->global_rank();
  183. RankList stage_device_list = g_device_manager->GetDeviceListInThisStage();
  184. Shape dev_matrix_shape = tensor_info.tensor_layout().device_arrangement().array();
  185. Shape tensor_map = tensor_info.tensor_layout().tensor_map().array();
  186. DeviceMatrix dev_matrix(rank, stage_device_list, dev_matrix_shape);
  187. RankList group_devices;
  188. if (dev_matrix.GetDevicesByTensorMap(tensor_map, &group_devices) != SUCCESS) {
  189. MS_LOG(EXCEPTION) << "Get devices by tensor map failed";
  190. }
  191. std::sort(group_devices.begin(), group_devices.end());
  192. return group_devices;
  193. }
  194. static ParameterSliceInfo GetParameterSliceInfo(const std::pair<AnfNodePtr, int64_t> &param_info) {
  195. auto user_cnode = param_info.first->cast<CNodePtr>();
  196. MS_EXCEPTION_IF_NULL(user_cnode);
  197. auto user_input_index = param_info.second;
  198. OperatorInfoPtr op_info = user_cnode->user_data<OperatorInfo>();
  199. MS_EXCEPTION_IF_NULL(op_info);
  200. TensorInfo tensor_info;
  201. if (IsPrimitiveCNode(user_cnode, prim::kPrimSend)) {
  202. auto param_index = IntToSize(GetValue<int>(user_cnode->GetPrimalAttr(PARAM_INDEX)));
  203. tensor_info = op_info->inputs_tensor_info()[param_index];
  204. } else {
  205. size_t input_tensor_info_size = op_info->inputs_tensor_info().size();
  206. if (SizeToLong(input_tensor_info_size) <= user_input_index - 1) {
  207. MS_LOG(EXCEPTION) << op_info->name() << ": the size of inputs tensor info is " << input_tensor_info_size
  208. << ", but the index is " << user_input_index - 1;
  209. }
  210. tensor_info = op_info->inputs_tensor_info()[user_input_index - 1];
  211. }
  212. ParameterSliceInfo parameter_slice_info;
  213. parameter_slice_info.slice_shape = tensor_info.slice_shape();
  214. parameter_slice_info.group_ranks = GetGroupByTensorInfo(tensor_info);
  215. MS_LOG(DEBUG) << "The op name is " << op_info->name() << ", the parameter index is " << user_input_index - 1
  216. << ", the slice shape is " << tensor_info.slice_shape() << ", the origin shape is "
  217. << tensor_info.shape() << ", the group rank list is " << parameter_slice_info.group_ranks;
  218. return parameter_slice_info;
  219. }
  220. void CheckParameterSplit(const std::vector<AnfNodePtr> &all_nodes) {
  221. for (auto &node : all_nodes) {
  222. ParameterUsersInfo parameter_users_info = FindParameterUsers(node, IsParallelCareNode);
  223. auto users_set = parameter_users_info.second.second;
  224. if (users_set.size() <= 1) {
  225. continue;
  226. }
  227. auto parameter_name = parameter_users_info.first;
  228. MS_LOG(INFO) << "The parameter: " << parameter_name << " has " << users_set.size() << " users";
  229. auto first_user = users_set.pop();
  230. ParameterSliceInfo parameter_slice_info = GetParameterSliceInfo(first_user);
  231. Shape first_user_slice_shape = parameter_slice_info.slice_shape;
  232. RankList first_user_group_list = parameter_slice_info.group_ranks;
  233. for (auto &user : users_set) {
  234. ParameterSliceInfo user_slice_info = GetParameterSliceInfo(user);
  235. Shape user_slice_shape = user_slice_info.slice_shape;
  236. RankList user_group_list = user_slice_info.group_ranks;
  237. if (first_user_slice_shape != user_slice_shape) {
  238. MS_LOG(EXCEPTION) << "The parameter: " << parameter_name
  239. << " has multiple users, but the slice shapes are different";
  240. }
  241. if (ParallelContext::GetInstance()->pipeline_stage_split_num() == 1 && first_user_group_list != user_group_list) {
  242. MS_LOG(EXCEPTION) << "The parameter: " << parameter_name
  243. << " has multiple users, but the group rank list are different, "
  244. << "the group rank list for first user is " << first_user_group_list
  245. << ", and the group rank list for this user is " << user_group_list;
  246. }
  247. }
  248. }
  249. }
  250. namespace {
  251. void RevertSymbolicKeyInstance(const FuncGraphPtr &root, const AnfNodePtr &node) {
  252. MS_EXCEPTION_IF_NULL(root);
  253. MS_EXCEPTION_IF_NULL(node);
  254. auto symbolic_key = GetValueNode<SymbolicKeyInstancePtr>(node);
  255. MS_EXCEPTION_IF_NULL(symbolic_key);
  256. auto all_upstream_node = root->manager()->node_users()[node];
  257. for (auto &upstream_node : all_upstream_node) {
  258. FuncGraphPtr fg = upstream_node.first->func_graph();
  259. if (symbolic_key->node()->isa<Parameter>()) {
  260. for (auto &param : root->parameters()) {
  261. if (*param == *symbolic_key->node()) {
  262. AnfNodePtr reverted_node = root->NewCNode({NewValueNode(prim::kPrimEmbed), param});
  263. MS_EXCEPTION_IF_NULL(reverted_node);
  264. MS_LOG(DEBUG) << "before replace " << node->ToString() << " to node " << reverted_node->DebugString();
  265. (void)fg->manager()->Replace(node, reverted_node);
  266. MS_LOG(DEBUG) << "revert node " << node->ToString() << " to node " << reverted_node->DebugString();
  267. }
  268. }
  269. }
  270. }
  271. }
  272. } // namespace
  273. void HandleSymbolicKeyInstance(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes) {
  274. MS_EXCEPTION_IF_NULL(root);
  275. for (auto &node : all_nodes) {
  276. // revert back SymbolicKeyInstance to embed() primitive
  277. if (IsValueNode<SymbolicKeyInstance>(node)) {
  278. RevertSymbolicKeyInstance(root, node);
  279. continue;
  280. }
  281. }
  282. }
  283. bool ParameterIsCloned(const AnfNodePtr &parameter_node) {
  284. MS_EXCEPTION_IF_NULL(parameter_node);
  285. auto cloned_parameter = parameter_node->cast<ParameterPtr>();
  286. MS_EXCEPTION_IF_NULL(cloned_parameter);
  287. // find the clone parameter
  288. if (!cloned_parameter->has_default()) {
  289. return false;
  290. }
  291. auto param_value = cloned_parameter->param_info();
  292. if (param_value == nullptr) {
  293. return false;
  294. }
  295. bool cloned = param_value->cloned();
  296. if (!cloned) {
  297. return false;
  298. }
  299. MS_LOG(INFO) << "The parameter: " << cloned_parameter->name() << " is cloned";
  300. return true;
  301. }
  302. void HandleNoUsedParameter(const FuncGraphPtr &root) {
  303. MS_EXCEPTION_IF_NULL(root);
  304. bool full_batch = ParallelContext::GetInstance()->full_batch();
  305. if (full_batch) {
  306. return;
  307. }
  308. // in grad accumulation mode, if use dynamic lr, it has some parameters in optimizer which no used for first graph,
  309. // but used for second graph(such as global_step), so can not change their shapes
  310. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  311. if (grad_accumulation_step > 1) {
  312. MS_LOG(INFO) << "In grad accumulation mode, do not handle no used parameters";
  313. return;
  314. }
  315. auto dev_num = g_device_manager->stage_device_num();
  316. auto parameters = root->parameters();
  317. for (auto &parameter : parameters) {
  318. if (IsUsedParameter(root, parameter)) {
  319. continue;
  320. }
  321. auto parameter_shape = GetNodeShape(parameter);
  322. if (parameter_shape.empty()) {
  323. continue;
  324. }
  325. Shape slice_shape = parameter_shape[0];
  326. if (slice_shape.empty()) {
  327. continue;
  328. }
  329. slice_shape[0] = slice_shape[0] / dev_num;
  330. auto slice_shape_ptr = std::make_shared<abstract::Shape>(slice_shape);
  331. auto abstract = parameter->abstract();
  332. MS_EXCEPTION_IF_NULL(abstract);
  333. auto abstract_cloned = abstract->Clone();
  334. MS_EXCEPTION_IF_NULL(abstract_cloned);
  335. abstract_cloned->set_shape(slice_shape_ptr);
  336. parameter->set_abstract(abstract_cloned);
  337. }
  338. }
  339. static bool IsFullySplitParameter(const ParameterPtr &param_ptr) {
  340. auto tensor_layout = param_ptr->user_data<parallel::TensorLayout>();
  341. if (tensor_layout == nullptr) {
  342. return false;
  343. }
  344. auto dev_mat_shape = tensor_layout->device_arrangement().array();
  345. auto tensor_map = tensor_layout->tensor_map().array();
  346. int64_t rank = g_device_manager->global_rank();
  347. RankList rank_list = g_device_manager->GetDeviceListInThisStage();
  348. DeviceMatrix dev_matrix(rank, rank_list, dev_mat_shape);
  349. RankList group_devices;
  350. if (dev_matrix.GetDevicesByTensorMap(tensor_map, &group_devices) != SUCCESS) {
  351. MS_LOG(WARNING) << "Get devices by tensor map failed, invalid tensor layout";
  352. return false;
  353. }
  354. if (group_devices.size() == 1) {
  355. MS_LOG(INFO) << "The parameter: " << param_ptr->name() << " is fully split";
  356. return true;
  357. }
  358. return false;
  359. }
  360. static void InsertFullySplitParamGradAccu(const std::pair<AnfNodePtr, int> &node_user,
  361. const FuncGraphManagerPtr &manager, const AnfNodePtr &accu_parameter) {
  362. auto cnode = node_user.first->cast<CNodePtr>();
  363. auto prim = GetCNodePrimitive(cnode);
  364. if (prim == nullptr) {
  365. MS_LOG(WARNING) << cnode->DebugString() << " can not insert fully split param grad accumulation node";
  366. return;
  367. }
  368. OperatorAttrs attrs;
  369. auto py_instance = CreatOpInstance(attrs, "_VirtualAdd", "grad_accu");
  370. auto value_node = NewValueNode(py_instance);
  371. std::vector<AnfNodePtr> virtual_node_input = {value_node, cnode->input(node_user.second), accu_parameter};
  372. auto graph = cnode->func_graph();
  373. auto virtual_node = graph->NewCNode(virtual_node_input);
  374. manager->SetEdge(cnode, node_user.second, virtual_node);
  375. }
  376. void HandleFullySplitParameters(const FuncGraphPtr &root) {
  377. int64_t grad_accumulation_step = ParallelContext::GetInstance()->grad_accumulation_step();
  378. if ((grad_accumulation_step <= 1) || root->has_flag(ACCUMULATION)) {
  379. return;
  380. }
  381. auto parameters = root->parameters();
  382. auto node_users_map = root->manager()->node_users();
  383. for (auto &parameter : parameters) {
  384. auto param_ptr = parameter->cast<ParameterPtr>();
  385. MS_EXCEPTION_IF_NULL(param_ptr);
  386. if (!IsFullySplitParameter(param_ptr)) {
  387. continue;
  388. }
  389. auto accu_parameter = FindGradAccuParameter(parameters, param_ptr->name());
  390. if (!accu_parameter) {
  391. continue; // some parameters no need to handle, such as itself or lr
  392. }
  393. auto node_users = node_users_map[parameter];
  394. for (auto &user : node_users) {
  395. auto node = user.first;
  396. auto cnode = node->cast<CNodePtr>();
  397. MS_EXCEPTION_IF_NULL(cnode);
  398. if (!cnode->in_forward_flag()) {
  399. continue;
  400. }
  401. InsertFullySplitParamGradAccu(user, root->manager(), accu_parameter);
  402. MS_LOG(INFO) << "Insert full split assign add node for " << param_ptr->name();
  403. break; // only need to insert once, if the parameter has many users
  404. }
  405. }
  406. }
  407. void SetClonedTensorShapeForOptimizer(const FuncGraphPtr &root) {
  408. MS_EXCEPTION_IF_NULL(root);
  409. for (auto &cloned_parameter_node : root->parameters()) {
  410. MS_EXCEPTION_IF_NULL(cloned_parameter_node);
  411. auto cloned_parameter = cloned_parameter_node->cast<ParameterPtr>();
  412. MS_EXCEPTION_IF_NULL(cloned_parameter);
  413. if (!ParameterIsCloned(cloned_parameter_node)) {
  414. continue;
  415. }
  416. auto param_value = cloned_parameter->param_info();
  417. if (param_value == nullptr) {
  418. continue;
  419. }
  420. // get the cloned index
  421. int64_t cloned_index = param_value->cloned_index();
  422. // find the be cloned parameter
  423. bool found_be_cloned_parameter = false;
  424. ParameterPtr cloned_from_parameter = nullptr;
  425. AnfNodePtr cloned_from_node = nullptr;
  426. for (auto &be_cloned_parameter_node : root->parameters()) {
  427. MS_EXCEPTION_IF_NULL(be_cloned_parameter_node);
  428. auto be_cloned_parameter = be_cloned_parameter_node->cast<ParameterPtr>();
  429. MS_EXCEPTION_IF_NULL(be_cloned_parameter);
  430. if (!be_cloned_parameter->has_default()) {
  431. continue;
  432. }
  433. auto param_value_in = be_cloned_parameter->param_info();
  434. if (param_value_in == nullptr) {
  435. continue;
  436. }
  437. if (!param_value_in->be_cloned()) {
  438. continue;
  439. }
  440. // get the be cloned index
  441. auto &be_cloned_index = param_value_in->be_cloned_index();
  442. if (std::find(be_cloned_index.begin(), be_cloned_index.end(), cloned_index) != be_cloned_index.end()) {
  443. found_be_cloned_parameter = true;
  444. cloned_from_parameter = be_cloned_parameter;
  445. cloned_from_node = be_cloned_parameter_node;
  446. }
  447. }
  448. if (found_be_cloned_parameter) {
  449. // set the shape and tensor layout for cloned parameter
  450. std::string param_name = cloned_parameter_node->cast<ParameterPtr>()->name();
  451. if (cloned_from_parameter->user_data<TensorLayout>() == nullptr) {
  452. MS_LOG(WARNING) << "The parameter " << param_name << " has not tensor layout, skip it";
  453. continue;
  454. }
  455. auto tensor_layout = cloned_from_parameter->user_data<TensorLayout>();
  456. MS_EXCEPTION_IF_NULL(cloned_parameter_node->abstract());
  457. MS_EXCEPTION_IF_NULL(cloned_from_node->abstract());
  458. auto cloned_abstract = cloned_parameter_node->abstract()->Clone();
  459. MS_EXCEPTION_IF_NULL(cloned_abstract);
  460. // from pipeline or grad accumulation
  461. if (param_name.find(ACCU_GRADS) != std::string::npos) {
  462. auto slice_shape = cloned_from_parameter->user_data<TensorLayout>()->slice_shape().array();
  463. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  464. MS_EXCEPTION_IF_NULL(parallel_shape);
  465. cloned_abstract->set_shape(parallel_shape);
  466. // in opt shard, accu_grad's shape is different from the original param's shape
  467. if (ParallelContext::GetInstance()->enable_parallel_optimizer()) {
  468. TensorLayout new_layout = *tensor_layout;
  469. new_layout.set_opt_shard_group("");
  470. tensor_layout = std::make_shared<TensorLayout>(new_layout);
  471. }
  472. } else {
  473. cloned_abstract->set_shape(cloned_from_node->abstract()->GetShapeTrack());
  474. }
  475. cloned_parameter->set_user_data<TensorLayout>(tensor_layout);
  476. cloned_parameter_node->set_abstract(cloned_abstract);
  477. MS_LOG(INFO) << "The parameter: " << cloned_parameter->name()
  478. << " is cloned, the be cloned parameter is: " << cloned_from_parameter->name()
  479. << ", clone index is: " << cloned_index;
  480. } else {
  481. MS_LOG(EXCEPTION) << "The parameter: " << cloned_parameter->name() << " is cloned, cloned index is "
  482. << cloned_index << ", but not found the be cloned parameter";
  483. }
  484. }
  485. }
  486. void HandleAdaFactorOpt(const FuncGraphPtr &root) {
  487. MS_EXCEPTION_IF_NULL(root);
  488. for (auto &param_node : root->parameters()) {
  489. MS_EXCEPTION_IF_NULL(param_node);
  490. auto param = param_node->cast<ParameterPtr>();
  491. MS_EXCEPTION_IF_NULL(param);
  492. std::string param_name = param->name();
  493. if (param_name.find(EXP_AVG) != std::string::npos) {
  494. continue;
  495. }
  496. auto tensor_layout = param->user_data<TensorLayout>();
  497. if (tensor_layout == nullptr) {
  498. continue;
  499. }
  500. int64_t row_col_count = 0;
  501. int64_t exp_avg_sq_count = 0;
  502. for (auto &row_col_node : root->parameters()) {
  503. MS_EXCEPTION_IF_NULL(row_col_node);
  504. auto row_col_param = row_col_node->cast<ParameterPtr>();
  505. MS_EXCEPTION_IF_NULL(row_col_param);
  506. std::string row_col_param_name = row_col_param->name();
  507. std::string exp_row_name = EXP_AVG_SQ_ROW + param_name;
  508. std::string exp_col_name = EXP_AVG_SQ_COL + param_name;
  509. std::string exp_avg_name = EXP_AVG_SQ + param_name;
  510. if ((row_col_param_name != exp_row_name) && (row_col_param_name != exp_col_name) &&
  511. (row_col_param_name != exp_avg_name)) {
  512. continue;
  513. }
  514. auto slice_shape = tensor_layout->slice_shape().array();
  515. auto shape_size = slice_shape.size();
  516. bool is_row_or_col_param = (row_col_param_name == exp_row_name) || (row_col_param_name == exp_col_name);
  517. if (is_row_or_col_param && shape_size <= 1) {
  518. continue;
  519. }
  520. if (row_col_param_name == exp_avg_name && shape_size != 1) {
  521. continue;
  522. }
  523. auto origin_shape = tensor_layout->tensor_shape().array();
  524. auto dev_mat = tensor_layout->device_arrangement().array();
  525. auto tensor_map = tensor_layout->tensor_map().array();
  526. if (row_col_param_name == exp_row_name) {
  527. slice_shape.pop_back();
  528. origin_shape.pop_back();
  529. tensor_map.pop_back();
  530. row_col_count++;
  531. } else if (row_col_param_name == exp_col_name) {
  532. (void)slice_shape.erase(slice_shape.begin() + static_cast<different_type>(SECOND_FROM_END(shape_size)));
  533. (void)origin_shape.erase(origin_shape.begin() + static_cast<different_type>(SECOND_FROM_END(shape_size)));
  534. (void)tensor_map.erase(tensor_map.begin() + static_cast<different_type>(SECOND_FROM_END(shape_size)));
  535. row_col_count++;
  536. } else {
  537. exp_avg_sq_count++;
  538. }
  539. TensorLayout new_tensor_layout;
  540. if (new_tensor_layout.InitFromVector(dev_mat, tensor_map, origin_shape) != SUCCESS) {
  541. MS_LOG(EXCEPTION) << "Init tensor layout failed";
  542. }
  543. auto cloned_abstract = row_col_node->abstract()->Clone();
  544. MS_EXCEPTION_IF_NULL(cloned_abstract);
  545. std::shared_ptr<abstract::BaseShape> parallel_shape = std::make_shared<abstract::Shape>(slice_shape);
  546. MS_EXCEPTION_IF_NULL(parallel_shape);
  547. cloned_abstract->set_shape(parallel_shape);
  548. row_col_param->set_user_data<TensorLayout>(std::make_shared<TensorLayout>(new_tensor_layout));
  549. row_col_node->set_abstract(cloned_abstract);
  550. MS_LOG(INFO) << "Set the slice shape for " << row_col_param_name << ", origin shape is " << origin_shape
  551. << ", new slice shape is " << slice_shape;
  552. if (row_col_count == 2 || exp_avg_sq_count == 1) {
  553. break;
  554. }
  555. }
  556. }
  557. }
  558. } // namespace parallel
  559. } // namespace mindspore