You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

context.cc 9.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "frontend/parallel/context.h"
  17. #include <algorithm>
  18. #include <cstdint>
  19. #include <functional>
  20. #include <map>
  21. #include <memory>
  22. #include <utility>
  23. #include "frontend/parallel/device_manager.h"
  24. namespace mindspore {
  25. namespace parallel {
  26. static std::map<std::string, Shape> param_shapes;
  27. std::vector<std::string> PARALLEL_MODE_LIST = {STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL,
  28. AUTO_PARALLEL};
  29. std::vector<std::string> STRATEGY_SEARCH_MODE_LIST = {DYNAMIC_PROGRAMMING, RECURSIVE_PROGRAMMING};
  30. std::vector<std::string> COMMUNI_PARALLEL_MODE_LIST = {ALL_GROUP_PARALLEL, SAME_SERVER_GROUP_PARALLEL,
  31. NO_GROUP_PARALLEL};
  32. std::shared_ptr<ParallelContext> ParallelContext::inst_context_ = nullptr;
  33. std::shared_ptr<ParallelContext> ParallelContext::GetInstance() {
  34. if (inst_context_ == nullptr) {
  35. inst_context_.reset(new (std::nothrow) ParallelContext());
  36. }
  37. return inst_context_;
  38. }
  39. ParallelContext::ParallelContext() { Reset(); }
  40. void ParallelContext::Reset() {
  41. gradients_mean_ = false;
  42. full_batch_ = false;
  43. gradient_fp32_sync_ = true;
  44. loss_repeated_mean_ = true;
  45. device_num_ = 1;
  46. global_rank_ = 0;
  47. device_num_is_set_ = false;
  48. global_rank_is_set_ = false;
  49. parallel_mode_ = STAND_ALONE;
  50. parameter_broadcast_ = false;
  51. parameter_broadcast_is_set_ = false;
  52. enable_all_reduce_fusion_ = false;
  53. strategy_ckpt_load_file_ = "";
  54. strategy_ckpt_save_file_ = "";
  55. enable_parallel_optimizer_ = false;
  56. all_reduce_fusion_split_indices_.clear();
  57. all_reduce_fusion_split_sizes_.clear();
  58. strategy_search_mode_ = DYNAMIC_PROGRAMMING;
  59. pipeline_stage_split_num_ = 1;
  60. grad_accumulation_step_ = 1;
  61. communi_parallel_mode_ = ALL_GROUP_PARALLEL;
  62. optimizer_weight_shard_size_ = -1;
  63. optimizer_weight_shard_integrated_save_ = false;
  64. }
  65. void ParallelContext::set_device_num(int64_t device_num) {
  66. device_num_ = device_num;
  67. device_num_is_set_ = true;
  68. }
  69. void ParallelContext::set_global_rank(int64_t global_rank) {
  70. global_rank_ = global_rank;
  71. global_rank_is_set_ = true;
  72. }
  73. void ParallelContext::set_gradients_mean(bool gradients_mean) { gradients_mean_ = gradients_mean; }
  74. void ParallelContext::set_full_batch(bool full_batch) { full_batch_ = full_batch; }
  75. void ParallelContext::set_grad_accumulation_step(int64_t grad_accumulation_step) {
  76. grad_accumulation_step_ = grad_accumulation_step;
  77. }
  78. void ParallelContext::set_gradient_fp32_sync(bool gradient_fp32_sync) { gradient_fp32_sync_ = gradient_fp32_sync; }
  79. void ParallelContext::set_loss_repeated_mean(bool loss_repeated_mean) { loss_repeated_mean_ = loss_repeated_mean; }
  80. void ParallelContext::set_pipeline_stage_split_num(const int64_t stage_num) { pipeline_stage_split_num_ = stage_num; }
  81. bool ParallelContext::set_parallel_mode(const std::string &parallel_mode) {
  82. auto iter = std::find(PARALLEL_MODE_LIST.begin(), PARALLEL_MODE_LIST.end(), parallel_mode);
  83. if (iter == PARALLEL_MODE_LIST.end()) {
  84. MS_LOG(INFO) << "Invalid parallel mode:" << parallel_mode;
  85. return false;
  86. }
  87. parallel_mode_ = parallel_mode;
  88. return true;
  89. }
  90. bool ParallelContext::set_strategy_search_mode(const std::string &strategy_search_mode) {
  91. auto iter = std::find(STRATEGY_SEARCH_MODE_LIST.begin(), STRATEGY_SEARCH_MODE_LIST.end(), strategy_search_mode);
  92. if (iter == STRATEGY_SEARCH_MODE_LIST.end()) {
  93. MS_LOG(INFO) << "Invalid strategy search mode mode: " << strategy_search_mode;
  94. return false;
  95. }
  96. strategy_search_mode_ = strategy_search_mode;
  97. return true;
  98. }
  99. void ParallelContext::set_parameter_broadcast(bool parameter_broadcast) {
  100. parameter_broadcast_ = parameter_broadcast;
  101. parameter_broadcast_is_set_ = true;
  102. }
  103. void ParallelContext::set_strategy_ckpt_load_file(const std::string &strategy_ckpt_load_file) {
  104. strategy_ckpt_load_file_ = strategy_ckpt_load_file;
  105. }
  106. void ParallelContext::set_strategy_ckpt_save_file(const std::string &strategy_ckpt_save_file) {
  107. strategy_ckpt_save_file_ = strategy_ckpt_save_file;
  108. }
  109. void ParallelContext::set_group_ckpt_save_file(const std::string &group_ckpt_save_file) {
  110. group_ckpt_save_file_ = group_ckpt_save_file;
  111. }
  112. void ParallelContext::set_optimizer_weight_shard_size(int64_t optimizer_weight_shard_size) {
  113. optimizer_weight_shard_size_ = optimizer_weight_shard_size;
  114. }
  115. void ParallelContext::set_optimizer_weight_shard_integrated_save(bool optimizer_weight_shard_integrated_save) {
  116. optimizer_weight_shard_integrated_save_ = optimizer_weight_shard_integrated_save;
  117. }
  118. void ParallelContext::SetAllReduceFusionSplitIndices(const std::vector<uint32_t> indices, const std::string &group) {
  119. all_reduce_fusion_split_indices_[group] = indices;
  120. }
  121. const std::vector<uint32_t> ParallelContext::GetAllReduceFusionSplitIndices(const std::string &group) const {
  122. auto iter = all_reduce_fusion_split_indices_.find(group);
  123. if (iter != all_reduce_fusion_split_indices_.end()) {
  124. return iter->second;
  125. }
  126. return {};
  127. }
  128. void ParallelContext::SetAllReduceFusionSplitSizes(const std::vector<uint32_t> sizes, const std::string &group) {
  129. all_reduce_fusion_split_sizes_[group] = sizes;
  130. }
  131. const std::vector<uint32_t> ParallelContext::GetAllReduceFusionSplitSizes(const std::string &group) const {
  132. auto iter = all_reduce_fusion_split_sizes_.find(group);
  133. if (iter != all_reduce_fusion_split_sizes_.end()) {
  134. return iter->second;
  135. }
  136. return {};
  137. }
  138. bool ParallelContext::set_communi_parallel_mode(const std::string &communi_parallel_mode) {
  139. auto iter = std::find(COMMUNI_PARALLEL_MODE_LIST.begin(), COMMUNI_PARALLEL_MODE_LIST.end(), communi_parallel_mode);
  140. if (iter == COMMUNI_PARALLEL_MODE_LIST.end()) {
  141. MS_LOG(INFO) << "Invalid communication parallel mode:" << communi_parallel_mode;
  142. return false;
  143. }
  144. communi_parallel_mode_ = communi_parallel_mode;
  145. return true;
  146. }
  147. // Clear param_shapes before training in auto-parallel or semi-auto-parallel mode
  148. void ParallelContext::ParallelParameterContextInitShape(const FuncGraphPtr &func_graph) {
  149. MS_EXCEPTION_IF_NULL(func_graph);
  150. if (!func_graph->has_flag(AUTO_PARALLEL)) {
  151. return;
  152. }
  153. if (!func_graph->has_flag(TRAINING)) {
  154. init_param_shape_ = false;
  155. MS_LOG(INFO) << "In parallel evaluation or prediction, may be need to restore the parameter shape";
  156. return;
  157. }
  158. if ((ParallelContext::GetInstance()->grad_accumulation_step() > 1) && !func_graph->has_flag(ACCUMULATION)) {
  159. init_param_shape_ = false;
  160. MS_LOG(INFO) << "In parallel grad accumulation second graph, need to restore the parameter shape";
  161. } else {
  162. param_shapes.clear();
  163. init_param_shape_ = true;
  164. MS_LOG(INFO) << "Init the parameter shape dict";
  165. }
  166. }
  167. // Restore the parameters' shape for evaluation/prediction in auto-parallel or semi-auto-parallel mode
  168. void ParallelContext::ParallelParameterContextRestoreShape(const FuncGraphPtr &func_graph,
  169. const ParameterPtr &param_node, AbstractBasePtr ptr) {
  170. MS_EXCEPTION_IF_NULL(func_graph);
  171. MS_EXCEPTION_IF_NULL(param_node);
  172. MS_EXCEPTION_IF_NULL(ptr);
  173. if (!func_graph->has_flag(AUTO_PARALLEL)) {
  174. return;
  175. }
  176. if (init_param_shape_) {
  177. return;
  178. }
  179. auto iter = param_shapes.find(param_node->name());
  180. if (iter == param_shapes.end()) {
  181. MS_LOG(WARNING) << "Can not found the shape for parameter " << param_node->name();
  182. return;
  183. }
  184. Shape shape = iter->second;
  185. std::shared_ptr<abstract::BaseShape> base_shape = std::make_shared<abstract::Shape>(shape);
  186. ptr->set_shape(base_shape);
  187. MS_LOG(INFO) << "The parameter name is " << param_node->name() << ", the shape is " << shape;
  188. }
  189. // Clear param_shapes before training in auto-parallel or semi-auto-parallel mode
  190. // Checkpoint the parameters' shape for training in auto-parallel or semi-auto-parallel mode
  191. void ParallelContext::ParallelParameterContextCkptShape(const FuncGraphPtr &func_graph, const ParameterPtr &param_node,
  192. const AbstractBasePtr &ptr) {
  193. MS_EXCEPTION_IF_NULL(func_graph);
  194. MS_EXCEPTION_IF_NULL(param_node);
  195. MS_EXCEPTION_IF_NULL(ptr);
  196. if (!func_graph->has_flag(AUTO_PARALLEL)) {
  197. return;
  198. }
  199. if (!init_param_shape_) {
  200. return;
  201. }
  202. std::vector<int64_t> shape = dyn_cast<abstract::Shape>(ptr->GetShapeTrack())->shape();
  203. auto ret = param_shapes.try_emplace(param_node->name(), shape);
  204. if (!ret.second) {
  205. MS_LOG(EXCEPTION) << "The shape for parameter name " << param_node->name() << " is existed";
  206. return;
  207. }
  208. MS_LOG(DEBUG) << "The parameter name is " << param_node->name() << ", the shape is " << shape;
  209. }
  210. } // namespace parallel
  211. } // namespace mindspore