You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

adam_fusion.cc 7.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/optimizer/gpu/adam_fusion.h"
  17. #include <memory>
  18. #include <vector>
  19. #include <string>
  20. #include "backend/session/anf_runtime_algorithm.h"
  21. #include "ir/primitive.h"
  22. #include "utils/utils.h"
  23. #include "backend/optimizer/common/helper.h"
  24. namespace mindspore {
  25. namespace opt {
  26. namespace {
  27. kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) {
  28. std::vector<std::string> inputs_format;
  29. std::vector<std::string> outputs_format;
  30. std::vector<TypeId> inputs_type;
  31. std::vector<TypeId> outputs_type;
  32. kernel::KernelBuildInfo::KernelBuildInfoBuilder builder;
  33. size_t input_num = AnfAlgo::GetInputTensorNum(node);
  34. for (size_t input_index = 0; input_index < input_num; ++input_index) {
  35. inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(node, input_index));
  36. inputs_format.push_back(kOpFormat_DEFAULT);
  37. }
  38. size_t output_num = AnfAlgo::GetOutputTensorNum(node);
  39. for (size_t output_index = 0; output_index < output_num; ++output_index) {
  40. outputs_type.push_back(AnfAlgo::GetOutputInferDataType(node, output_index));
  41. outputs_format.push_back(kOpFormat_DEFAULT);
  42. }
  43. builder.SetInputsDeviceType(inputs_type);
  44. builder.SetInputsFormat(inputs_format);
  45. builder.SetOutputsDeviceType(outputs_type);
  46. builder.SetOutputsFormat(outputs_format);
  47. return builder.Build();
  48. }
  49. AnfNodePtr RelpaceOutputEdge(const AnfNodePtr &node, CNodePtr adam, AnfNodePtr u_input) {
  50. // Replace the parameters of the last UpdateState to maintain
  51. // the execution order of FusedAdam and the following operators.
  52. // n represents the operator assign_v in {prim::kPrimDepend, next_param, assign_v}
  53. const auto &n = node->cast<CNodePtr>()->input(2);
  54. MS_EXCEPTION_IF_NULL(n);
  55. const auto &fg = n->func_graph();
  56. MS_EXCEPTION_IF_NULL(fg);
  57. auto mgr = fg->manager();
  58. MS_EXCEPTION_IF_NULL(mgr);
  59. auto &node_users = mgr->node_users();
  60. auto iter = node_users.find(n);
  61. if (iter == node_users.end()) {
  62. MS_LOG(EXCEPTION) << "Can not find node : " << n->DebugString();
  63. }
  64. auto &users = iter->second;
  65. for (auto &user : users) {
  66. if (IsPrimitiveCNode(user.first, prim::kPrimUpdateState)) {
  67. (user.first)->cast<CNodePtr>()->set_input(1, u_input);
  68. (user.first)->cast<CNodePtr>()->set_input(2, adam);
  69. break;
  70. }
  71. }
  72. return adam;
  73. }
  74. } // namespace
  75. const BaseRef AdamFusion::DefinePattern() const {
  76. VectorRef load_param = VectorRef({prim::kPrimLoad, param_, u_});
  77. VectorRef load_m = VectorRef({prim::kPrimLoad, m_, u_});
  78. VectorRef load_v = VectorRef({prim::kPrimLoad, v_, u_});
  79. VectorRef next_m = VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta1_, load_m}),
  80. VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})});
  81. VectorRef next_v =
  82. VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta2_, load_v}),
  83. VectorRef({prim::kPrimMul, one_sub_beta2_, VectorRef({prim::kPrimSquare, gradient_})})});
  84. VectorRef update =
  85. VectorRef({prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})});
  86. VectorRef update_with_lr = VectorRef({prim::kPrimMul, lr_, update});
  87. VectorRef next_param = VectorRef({prim::kPrimSub, load_param, update_with_lr});
  88. VectorRef tuple_load = VectorRef({prim::kPrimMakeTuple, load_param, load_m, load_v});
  89. VectorRef next_state = VectorRef({prim::kPrimUpdateState, u_, tuple_load});
  90. VectorRef assign_param = VectorRef({prim::kPrimAssign, param_, next_param, next_state});
  91. next_state = VectorRef({prim::kPrimUpdateState, next_state, assign_param});
  92. next_param = VectorRef({prim::kPrimDepend, next_param, assign_param});
  93. VectorRef assign_m = VectorRef({prim::kPrimAssign, m_, next_m, next_state});
  94. next_state = VectorRef({prim::kPrimUpdateState, next_state, assign_m});
  95. next_param = VectorRef({prim::kPrimDepend, next_param, assign_m});
  96. VectorRef assign_v = VectorRef({prim::kPrimAssign, v_, next_v, next_state});
  97. next_param = VectorRef({prim::kPrimDepend, next_param, assign_v});
  98. return next_param;
  99. }
  100. const AnfNodePtr AdamFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &equiv) const {
  101. MS_EXCEPTION_IF_NULL(graph);
  102. MS_EXCEPTION_IF_NULL(node);
  103. MS_EXCEPTION_IF_NULL(equiv);
  104. auto beta1_input = utils::cast<AnfNodePtr>((*equiv)[beta1_]);
  105. auto one_sub_beta1_input = utils::cast<AnfNodePtr>((*equiv)[one_sub_beta1_]);
  106. auto beta2_input = utils::cast<AnfNodePtr>((*equiv)[beta2_]);
  107. auto one_sub_beta2_input = utils::cast<AnfNodePtr>((*equiv)[one_sub_beta2_]);
  108. auto eps_input = utils::cast<AnfNodePtr>((*equiv)[eps_]);
  109. auto lr_input = utils::cast<AnfNodePtr>((*equiv)[lr_]);
  110. auto param_input = utils::cast<AnfNodePtr>((*equiv)[param_]);
  111. auto m_input = utils::cast<AnfNodePtr>((*equiv)[m_]);
  112. auto v_input = utils::cast<AnfNodePtr>((*equiv)[v_]);
  113. auto gradient_input = utils::cast<AnfNodePtr>((*equiv)[gradient_]);
  114. auto u_input = utils::cast<AnfNodePtr>((*equiv)[u_]);
  115. MS_EXCEPTION_IF_NULL(beta1_input);
  116. MS_EXCEPTION_IF_NULL(one_sub_beta1_input);
  117. MS_EXCEPTION_IF_NULL(beta2_input);
  118. MS_EXCEPTION_IF_NULL(one_sub_beta2_input);
  119. MS_EXCEPTION_IF_NULL(eps_input);
  120. MS_EXCEPTION_IF_NULL(lr_input);
  121. MS_EXCEPTION_IF_NULL(param_input);
  122. MS_EXCEPTION_IF_NULL(m_input);
  123. MS_EXCEPTION_IF_NULL(v_input);
  124. MS_EXCEPTION_IF_NULL(gradient_input);
  125. MS_EXCEPTION_IF_NULL(u_input);
  126. // Use depend(param, u) to maintain the execution order of FusedAdam and the previous operators.
  127. auto prim_depend = std::make_shared<Primitive>(prim::kPrimDepend->name());
  128. MS_EXCEPTION_IF_NULL(prim_depend);
  129. std::vector<AnfNodePtr> param_inputs = {NewValueNode(prim_depend), param_input, u_input};
  130. auto param = graph->NewCNode(param_inputs);
  131. MS_EXCEPTION_IF_NULL(param);
  132. param->set_abstract(param_input->abstract());
  133. // Fused into a FusedAdam operator.
  134. auto prim = std::make_shared<Primitive>(kFusedAdamName);
  135. MS_EXCEPTION_IF_NULL(prim);
  136. auto prim_value = NewValueNode(prim);
  137. std::vector<AnfNodePtr> inputs = {
  138. prim_value, beta1_input, one_sub_beta1_input, beta2_input, one_sub_beta2_input, eps_input, lr_input, param,
  139. m_input, v_input, gradient_input};
  140. auto adam = graph->NewCNode(inputs);
  141. MS_EXCEPTION_IF_NULL(adam);
  142. auto types = {AnfAlgo::GetOutputInferDataType(node, 0)};
  143. auto shapes = {AnfAlgo::GetOutputInferShape(node, 0)};
  144. AnfAlgo::SetOutputInferTypeAndShape(types, shapes, adam.get());
  145. adam->set_scope(node->scope());
  146. auto build_info = GenerateKernelBuildInfo(adam);
  147. AnfAlgo::SetSelectKernelBuildInfo(build_info, adam.get());
  148. return RelpaceOutputEdge(node, adam, u_input);
  149. }
  150. } // namespace opt
  151. } // namespace mindspore