You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_stream_assign.cc 8.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <set>
  17. #include <string>
  18. #include <memory>
  19. #include <algorithm>
  20. #include "device/gpu/gpu_common.h"
  21. #include "device/gpu/kernel_info_setter.h"
  22. #include "device/gpu/gpu_device_manager.h"
  23. #include "device/gpu/gpu_stream_assign.h"
  24. namespace mindspore {
  25. namespace device {
  26. namespace gpu {
  27. void AssignGpuStream(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
  28. MS_EXCEPTION_IF_NULL(kernel_graph);
  29. std::vector<CNodePtr> allreduce_kernels;
  30. auto execution_kernels = kernel_graph->execution_order();
  31. for (auto kernel_node : execution_kernels) {
  32. std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node);
  33. if (kernel_name == kAllReduceOpName) {
  34. allreduce_kernels.emplace_back(kernel_node);
  35. } else {
  36. DeviceStream compute_stream = GPUDeviceManager::GetInstance().default_stream();
  37. AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast<uintptr_t>(compute_stream)), kernel_node);
  38. }
  39. }
  40. if (allreduce_kernels.size() > 1) {
  41. // Assign multiple streams only when there's Recv node for AllReduce.
  42. std::vector<SendRecvPair> send_recv_pairs;
  43. if (FindAllReduceStreamSwitchPos(kernel_graph, &send_recv_pairs)) {
  44. DeviceStream comm_stream = nullptr;
  45. GPUDeviceManager::GetInstance().CreateStream(&comm_stream);
  46. std::transform(
  47. allreduce_kernels.begin(), allreduce_kernels.end(), allreduce_kernels.begin(), [&](CNodePtr allreduce_kernel) {
  48. AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast<uintptr_t>(comm_stream)), allreduce_kernel);
  49. return allreduce_kernel;
  50. });
  51. InsertStreamSwitchNode(kernel_graph, send_recv_pairs);
  52. } else {
  53. return;
  54. }
  55. }
  56. }
  57. bool FindAllReduceStreamSwitchPos(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  58. std::vector<SendRecvPair> *send_recv_pairs) {
  59. auto execution_kernels = kernel_graph->execution_order();
  60. std::vector<CNodePtr>::iterator iter, iter_begin;
  61. iter = iter_begin = execution_kernels.begin();
  62. std::vector<CNodePtr>::iterator iter_end = execution_kernels.end();
  63. for (; iter != execution_kernels.end(); ++iter) {
  64. std::string kernel_name = AnfAlgo::GetCNodeName(*iter);
  65. if (kernel_name == kAllReduceOpName) {
  66. // Find AllReduce node's last input node.
  67. std::vector<CNodePtr>::iterator mock_send_node_iter =
  68. FindSendNodePos(iter_begin, iter + 1, *iter, kAllReduceStreamSwitch);
  69. if (mock_send_node_iter == iter + 1) {
  70. MS_LOG(WARNING) << "Can't find send node place before AllReduce node.";
  71. continue;
  72. }
  73. SendRecvPair pair1 = {kAllReduceStreamSwitch, *mock_send_node_iter, *iter,
  74. IntToSize(mock_send_node_iter - iter_begin + 1), IntToSize(iter - iter_begin)};
  75. send_recv_pairs->push_back(pair1);
  76. // Find node which uses AllReduce as input[0].
  77. std::vector<CNodePtr>::iterator mock_recv_node_iter =
  78. FindRecvNodePos(iter, iter_end, *iter, kAllReduceStreamSwitch);
  79. if (mock_recv_node_iter == iter_end) {
  80. MS_LOG(WARNING) << "Can't find recv node place after AllReduce node.";
  81. return false;
  82. }
  83. SendRecvPair pair2 = {kAllReduceStreamSwitch, *iter, *mock_recv_node_iter, IntToSize(iter - iter_begin + 1),
  84. IntToSize(mock_recv_node_iter - iter_begin)};
  85. send_recv_pairs->push_back(pair2);
  86. }
  87. }
  88. return true;
  89. }
  90. std::vector<CNodePtr>::iterator FindSendNodePos(std::vector<CNodePtr>::iterator begin,
  91. std::vector<CNodePtr>::iterator end, const CNodePtr mock_recv_node,
  92. StreamSwitchType stream_switch_type) {
  93. MS_EXCEPTION_IF_NULL(mock_recv_node);
  94. if (stream_switch_type == kAllReduceStreamSwitch) {
  95. for (auto iter = begin; iter != end; iter++) {
  96. if (*(iter + 1) == mock_recv_node) {
  97. return iter;
  98. }
  99. }
  100. }
  101. return end;
  102. }
  103. std::vector<CNodePtr>::iterator FindRecvNodePos(std::vector<CNodePtr>::iterator begin,
  104. std::vector<CNodePtr>::iterator end, const CNodePtr mock_send_node,
  105. StreamSwitchType stream_switch_type) {
  106. MS_EXCEPTION_IF_NULL(mock_send_node);
  107. for (auto iter = begin; iter != end; iter++) {
  108. auto node = *iter;
  109. if (stream_switch_type == kAllReduceStreamSwitch) {
  110. for (auto input : node->inputs()) {
  111. if (mock_send_node == AnfAlgo::VisitKernel(input, 0).first) {
  112. return iter;
  113. }
  114. }
  115. }
  116. }
  117. return end;
  118. }
  119. void InsertStreamSwitchNode(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  120. const std::vector<SendRecvPair> &send_recv_pairs) {
  121. std::set<StreamSwitchNode> ordered_stream_switch_nodes;
  122. for (SendRecvPair pair : send_recv_pairs) {
  123. StreamSwitchType stream_switch_type = pair.stream_switch_type;
  124. CNodePtr mock_send_node = pair.mock_send_node;
  125. CNodePtr mock_recv_node = pair.mock_recv_node;
  126. size_t send_node_offset = pair.send_node_offset;
  127. size_t recv_node_offset = pair.recv_node_offset;
  128. CNodePtr send_node = nullptr;
  129. CNodePtr recv_node = nullptr;
  130. // Step 1: generate Send and Recv CNodes.
  131. if (stream_switch_type == kAllReduceStreamSwitch) {
  132. if (!GenSendRecvCNodesForAllReduce(kernel_graph, mock_send_node, mock_recv_node, &send_node, &recv_node)) {
  133. MS_LOG(EXCEPTION) << "Generating CNodes for send and recv failed. Stream switch type: kAllReduceStreamSwitch";
  134. }
  135. }
  136. // Step 2: sort send and recv CNodes by offset.
  137. ordered_stream_switch_nodes.insert({send_node_offset, send_node});
  138. ordered_stream_switch_nodes.insert({recv_node_offset, recv_node});
  139. }
  140. // Step 3: insert stream switch CNodes into execution kernel list.
  141. auto execution_kernels = kernel_graph->execution_order();
  142. for (auto node = ordered_stream_switch_nodes.rbegin(); node != ordered_stream_switch_nodes.rend(); node++) {
  143. execution_kernels.insert(execution_kernels.begin() + node->offset, node->cnode);
  144. }
  145. kernel_graph->set_execution_order(execution_kernels);
  146. }
  147. bool GenSendRecvCNodesForAllReduce(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  148. const CNodePtr &mock_send_node, const CNodePtr &mock_recv_node, CNodePtr *send_node,
  149. CNodePtr *recv_node) {
  150. *send_node = CreateStreamSwitchNode(kernel_graph, kSendOpName);
  151. MS_EXCEPTION_IF_NULL(*send_node);
  152. *recv_node = CreateStreamSwitchNode(kernel_graph, kRecvOpName);
  153. MS_EXCEPTION_IF_NULL(*recv_node);
  154. cudaEvent_t event = nullptr;
  155. CHECK_CUDA_RET_WITH_EXCEPT(cudaEventCreate(&event, cudaEventDisableTiming), "Creating cuda event failed.");
  156. AnfAlgo::SetNodeAttr("record_event", MakeValue(reinterpret_cast<uintptr_t>(event)), *send_node);
  157. AnfAlgo::SetNodeAttr("wait_event", MakeValue(reinterpret_cast<uintptr_t>(event)), *recv_node);
  158. uintptr_t send_stream = AnfAlgo::GetNodeAttr<uintptr_t>(mock_send_node, "stream_id");
  159. AnfAlgo::SetNodeAttr("record_event_stream", MakeValue(send_stream), *send_node);
  160. uintptr_t recv_stream = AnfAlgo::GetNodeAttr<uintptr_t>(mock_recv_node, "stream_id");
  161. AnfAlgo::SetNodeAttr("wait_event_stream", MakeValue(recv_stream), *recv_node);
  162. return true;
  163. }
  164. CNodePtr CreateStreamSwitchNode(const std::shared_ptr<session::KernelGraph> &kernel_graph, const std::string &name) {
  165. auto op = std::make_shared<Primitive>(name);
  166. auto apply = std::make_shared<ValueNode>(op);
  167. std::vector<AnfNodePtr> input_list = {apply};
  168. CNodePtr node = kernel_graph->NewCNode(input_list);
  169. MS_EXCEPTION_IF_NULL(node);
  170. kernel::KernelBuildInfo::KernelBuildInfoBuilder selected_kernel_builder;
  171. AnfAlgo::SetSelectKernelBuildInfo(selected_kernel_builder.Build(), node.get());
  172. auto abstract_none = std::make_shared<abstract::AbstractNone>();
  173. node->set_abstract(abstract_none);
  174. SetKernelInfo(node);
  175. return node;
  176. }
  177. } // namespace gpu
  178. } // namespace device
  179. } // namespace mindspore