You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

adam_fusion.cc 7.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/optimizer/gpu/adam_fusion.h"
  17. #include <memory>
  18. #include <vector>
  19. #include <string>
  20. #include "backend/session/anf_runtime_algorithm.h"
  21. #include "ir/primitive.h"
  22. #include "utils/utils.h"
  23. #include "backend/optimizer/common/helper.h"
  24. namespace mindspore {
  25. namespace opt {
  26. namespace {
  27. kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) {
  28. std::vector<std::string> inputs_format;
  29. std::vector<std::string> outputs_format;
  30. std::vector<TypeId> inputs_type;
  31. std::vector<TypeId> outputs_type;
  32. kernel::KernelBuildInfo::KernelBuildInfoBuilder builder;
  33. size_t input_num = AnfAlgo::GetInputTensorNum(node);
  34. for (size_t input_index = 0; input_index < input_num; ++input_index) {
  35. inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(node, input_index));
  36. inputs_format.push_back(kOpFormat_DEFAULT);
  37. }
  38. size_t output_num = AnfAlgo::GetOutputTensorNum(node);
  39. for (size_t output_index = 0; output_index < output_num; ++output_index) {
  40. outputs_type.push_back(AnfAlgo::GetOutputInferDataType(node, output_index));
  41. outputs_format.push_back(kOpFormat_DEFAULT);
  42. }
  43. builder.SetInputsDeviceType(inputs_type);
  44. builder.SetInputsFormat(inputs_format);
  45. builder.SetOutputsDeviceType(outputs_type);
  46. builder.SetOutputsFormat(outputs_format);
  47. return builder.Build();
  48. }
  49. } // namespace
  50. const BaseRef AdamFusion::DefinePattern() const {
  51. VectorRef load_m = VectorRef({prim::kPrimLoad, m_, u_});
  52. VectorRef next_m = VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta1_, load_m}),
  53. VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})});
  54. VectorRef load_v = VectorRef({prim::kPrimLoad, v_, u_});
  55. VectorRef next_v =
  56. VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta2_, load_v}),
  57. VectorRef({prim::kPrimMul, one_sub_beta2_, VectorRef({prim::kPrimSquare, gradient_})})});
  58. VectorRef update =
  59. VectorRef({prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})});
  60. VectorRef update_with_lr = VectorRef({prim::kPrimMul, lr_, update});
  61. VectorRef next_param = VectorRef({prim::kPrimSub, param_, update_with_lr});
  62. VectorRef assign_param = VectorRef({prim::kPrimAssign, param_, next_param, u2_});
  63. VectorRef next_state = VectorRef({prim::kPrimUpdateState, u2_, assign_param});
  64. next_param = VectorRef({prim::kPrimDepend, next_param, assign_param});
  65. VectorRef assign_m = VectorRef({prim::kPrimAssign, m_, next_m, next_state});
  66. next_state = VectorRef({prim::kPrimUpdateState, next_state, assign_m});
  67. next_param = VectorRef({prim::kPrimDepend, next_param, assign_m});
  68. VectorRef assign_v = VectorRef({prim::kPrimAssign, v_, next_v, next_state});
  69. next_param = VectorRef({prim::kPrimDepend, next_param, assign_v});
  70. return next_param;
  71. }
  72. const AnfNodePtr AdamFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &equiv) const {
  73. MS_EXCEPTION_IF_NULL(graph);
  74. MS_EXCEPTION_IF_NULL(node);
  75. MS_EXCEPTION_IF_NULL(equiv);
  76. auto beta1_input = utils::cast<AnfNodePtr>((*equiv)[beta1_]);
  77. auto one_sub_beta1_input = utils::cast<AnfNodePtr>((*equiv)[one_sub_beta1_]);
  78. auto beta2_input = utils::cast<AnfNodePtr>((*equiv)[beta2_]);
  79. auto one_sub_beta2_input = utils::cast<AnfNodePtr>((*equiv)[one_sub_beta2_]);
  80. auto eps_input = utils::cast<AnfNodePtr>((*equiv)[eps_]);
  81. auto lr_input = utils::cast<AnfNodePtr>((*equiv)[lr_]);
  82. auto param_input = utils::cast<AnfNodePtr>((*equiv)[param_]);
  83. auto m_input = utils::cast<AnfNodePtr>((*equiv)[m_]);
  84. auto v_input = utils::cast<AnfNodePtr>((*equiv)[v_]);
  85. auto gradient_input = utils::cast<AnfNodePtr>((*equiv)[gradient_]);
  86. auto u_input = utils::cast<AnfNodePtr>((*equiv)[u_]);
  87. MS_EXCEPTION_IF_NULL(beta1_input);
  88. MS_EXCEPTION_IF_NULL(one_sub_beta1_input);
  89. MS_EXCEPTION_IF_NULL(beta2_input);
  90. MS_EXCEPTION_IF_NULL(one_sub_beta2_input);
  91. MS_EXCEPTION_IF_NULL(eps_input);
  92. MS_EXCEPTION_IF_NULL(lr_input);
  93. MS_EXCEPTION_IF_NULL(param_input);
  94. MS_EXCEPTION_IF_NULL(m_input);
  95. MS_EXCEPTION_IF_NULL(v_input);
  96. MS_EXCEPTION_IF_NULL(gradient_input);
  97. MS_EXCEPTION_IF_NULL(u_input);
  98. // Use depend(param, u) to maintain the execution order of FusedAdam and the previous operators.
  99. auto prim_depend = std::make_shared<Primitive>(prim::kPrimDepend->name());
  100. MS_EXCEPTION_IF_NULL(prim_depend);
  101. std::vector<AnfNodePtr> param_inputs = {NewValueNode(prim_depend), param_input, u_input};
  102. auto param = graph->NewCNode(param_inputs);
  103. MS_EXCEPTION_IF_NULL(param);
  104. param->set_abstract(param_input->abstract());
  105. // Fused into a FusedAdam operator.
  106. auto prim = std::make_shared<Primitive>(kFusedAdamName);
  107. MS_EXCEPTION_IF_NULL(prim);
  108. std::vector<AnfNodePtr> inputs = {NewValueNode(prim),
  109. beta1_input,
  110. one_sub_beta1_input,
  111. beta2_input,
  112. one_sub_beta2_input,
  113. eps_input,
  114. lr_input,
  115. param,
  116. m_input,
  117. v_input,
  118. gradient_input};
  119. auto adam = graph->NewCNode(inputs);
  120. MS_EXCEPTION_IF_NULL(adam);
  121. auto types = {AnfAlgo::GetOutputInferDataType(node, 0)};
  122. auto shapes = {AnfAlgo::GetOutputInferShape(node, 0)};
  123. AnfAlgo::SetOutputInferTypeAndShape(types, shapes, adam.get());
  124. adam->set_scope(node->scope());
  125. auto build_info = GenerateKernelBuildInfo(adam);
  126. AnfAlgo::SetSelectKernelBuildInfo(build_info, adam.get());
  127. // Replace the parameters of the last UpdateState to maintain
  128. // the execution order of FusedAdam and the following operators.
  129. // n represents the operator assign_v in {prim::kPrimDepend, next_param, assign_v}
  130. auto n = node->cast<CNodePtr>()->input(2);
  131. auto fg = n->func_graph();
  132. MS_EXCEPTION_IF_NULL(fg);
  133. auto mgr = fg->manager();
  134. MS_EXCEPTION_IF_NULL(mgr);
  135. auto &node_users = mgr->node_users();
  136. auto iter = node_users.find(n);
  137. if (iter == node_users.end()) {
  138. MS_LOG(EXCEPTION) << "Can not find node : " << n->DebugString();
  139. }
  140. auto &users = iter->second;
  141. for (auto &user : users) {
  142. if (IsPrimitiveCNode(user.first, prim::kPrimUpdateState)) {
  143. (user.first)->cast<CNodePtr>()->set_input(1, u_input);
  144. (user.first)->cast<CNodePtr>()->set_input(2, adam);
  145. break;
  146. }
  147. }
  148. return adam;
  149. }
  150. } // namespace opt
  151. } // namespace mindspore