You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_stream_assign.cc 8.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <set>
  17. #include <string>
  18. #include <memory>
  19. #include <algorithm>
  20. #include "device/gpu/gpu_common.h"
  21. #include "device/gpu/kernel_info_setter.h"
  22. #include "device/gpu/gpu_device_manager.h"
  23. #include "device/gpu/gpu_stream_assign.h"
  24. namespace mindspore {
  25. namespace device {
  26. namespace gpu {
  27. void AssignGpuStream(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
  28. MS_EXCEPTION_IF_NULL(kernel_graph);
  29. std::vector<CNodePtr> allreduce_kernels;
  30. auto execution_kernels = kernel_graph->execution_order();
  31. for (auto kernel_node : execution_kernels) {
  32. std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node);
  33. if (kernel_name == kAllReduceOpName) {
  34. allreduce_kernels.emplace_back(kernel_node);
  35. } else {
  36. DeviceStream compute_stream = GPUDeviceManager::GetInstance().default_stream();
  37. AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast<uintptr_t>(compute_stream)), kernel_node);
  38. }
  39. }
  40. if (allreduce_kernels.size() > 1) {
  41. DeviceStream comm_stream = nullptr;
  42. GPUDeviceManager::GetInstance().CreateStream(&comm_stream);
  43. std::transform(
  44. allreduce_kernels.begin(), allreduce_kernels.end(), allreduce_kernels.begin(), [&](CNodePtr allreduce_kernel) {
  45. AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast<uintptr_t>(comm_stream)), allreduce_kernel);
  46. return allreduce_kernel;
  47. });
  48. std::vector<SendRecvPair> send_recv_pairs;
  49. FindAllReduceStreamSwitchPos(kernel_graph, &send_recv_pairs);
  50. InsertStreamSwitchNode(kernel_graph, send_recv_pairs);
  51. }
  52. }
  53. void FindAllReduceStreamSwitchPos(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  54. std::vector<SendRecvPair> *send_recv_pairs) {
  55. auto execution_kernels = kernel_graph->execution_order();
  56. std::vector<CNodePtr>::iterator iter, iter_begin;
  57. iter = iter_begin = execution_kernels.begin();
  58. std::vector<CNodePtr>::iterator iter_end = execution_kernels.end();
  59. for (; iter != execution_kernels.end(); ++iter) {
  60. std::string kernel_name = AnfAlgo::GetCNodeName(*iter);
  61. if (kernel_name == kAllReduceOpName) {
  62. // Find AllReduce node's last input node.
  63. std::vector<CNodePtr>::iterator mock_send_node_iter =
  64. FindSendNodePos(iter_begin, iter + 1, *iter, kAllReduceStreamSwitch);
  65. if (mock_send_node_iter == iter + 1) {
  66. MS_LOG(WARNING) << "Can't find send node place before AllReduce node.";
  67. continue;
  68. }
  69. SendRecvPair pair1 = {kAllReduceStreamSwitch, *mock_send_node_iter, *iter,
  70. IntToSize(mock_send_node_iter - iter_begin + 1), IntToSize(iter - iter_begin)};
  71. send_recv_pairs->push_back(pair1);
  72. // Find node which uses AllReduce as input[0].
  73. std::vector<CNodePtr>::iterator mock_recv_node_iter =
  74. FindRecvNodePos(iter, iter_end, *iter, kAllReduceStreamSwitch);
  75. if (mock_recv_node_iter == iter_end) {
  76. MS_LOG(WARNING) << "Can't find send node place before AllReduce node.";
  77. continue;
  78. }
  79. SendRecvPair pair2 = {kAllReduceStreamSwitch, *iter, *mock_recv_node_iter, IntToSize(iter - iter_begin + 1),
  80. IntToSize(mock_recv_node_iter - iter_begin)};
  81. send_recv_pairs->push_back(pair2);
  82. }
  83. }
  84. }
  85. std::vector<CNodePtr>::iterator FindSendNodePos(std::vector<CNodePtr>::iterator begin,
  86. std::vector<CNodePtr>::iterator end, const CNodePtr mock_recv_node,
  87. StreamSwitchType stream_switch_type) {
  88. MS_EXCEPTION_IF_NULL(mock_recv_node);
  89. if (stream_switch_type == kAllReduceStreamSwitch) {
  90. for (auto iter = begin; iter != end; iter++) {
  91. if (*(iter + 1) == mock_recv_node) {
  92. return iter;
  93. }
  94. }
  95. }
  96. return end;
  97. }
  98. std::vector<CNodePtr>::iterator FindRecvNodePos(std::vector<CNodePtr>::iterator begin,
  99. std::vector<CNodePtr>::iterator end, const CNodePtr mock_send_node,
  100. StreamSwitchType stream_switch_type) {
  101. MS_EXCEPTION_IF_NULL(mock_send_node);
  102. for (auto iter = begin; iter != end; iter++) {
  103. auto node = *iter;
  104. if (stream_switch_type == kAllReduceStreamSwitch) {
  105. for (auto input : node->inputs()) {
  106. if (mock_send_node == AnfAlgo::VisitKernel(input, 0).first) {
  107. return iter;
  108. }
  109. }
  110. }
  111. }
  112. return end;
  113. }
  114. void InsertStreamSwitchNode(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  115. const std::vector<SendRecvPair> &send_recv_pairs) {
  116. std::set<StreamSwitchNode> ordered_stream_switch_nodes;
  117. for (SendRecvPair pair : send_recv_pairs) {
  118. StreamSwitchType stream_switch_type = pair.stream_switch_type;
  119. CNodePtr mock_send_node = pair.mock_send_node;
  120. CNodePtr mock_recv_node = pair.mock_recv_node;
  121. size_t send_node_offset = pair.send_node_offset;
  122. size_t recv_node_offset = pair.recv_node_offset;
  123. CNodePtr send_node = nullptr;
  124. CNodePtr recv_node = nullptr;
  125. // Step 1: generate Send and Recv CNodes.
  126. if (stream_switch_type == kAllReduceStreamSwitch) {
  127. if (!GenSendRecvCNodesForAllReduce(kernel_graph, mock_send_node, mock_recv_node, &send_node, &recv_node)) {
  128. MS_LOG(EXCEPTION) << "Generating CNodes for send and recv failed. Stream switch type: kAllReduceStreamSwitch";
  129. }
  130. }
  131. // Step 2: sort send and recv CNodes by offset.
  132. ordered_stream_switch_nodes.insert({send_node_offset, send_node});
  133. ordered_stream_switch_nodes.insert({recv_node_offset, recv_node});
  134. }
  135. // Step 3: insert stream switch CNodes into execution kernel list.
  136. auto execution_kernels = kernel_graph->execution_order();
  137. for (auto node = ordered_stream_switch_nodes.rbegin(); node != ordered_stream_switch_nodes.rend(); node++) {
  138. execution_kernels.insert(execution_kernels.begin() + node->offset, node->cnode);
  139. }
  140. kernel_graph->set_execution_order(execution_kernels);
  141. }
  142. bool GenSendRecvCNodesForAllReduce(const std::shared_ptr<session::KernelGraph> &kernel_graph,
  143. const CNodePtr &mock_send_node, const CNodePtr &mock_recv_node, CNodePtr *send_node,
  144. CNodePtr *recv_node) {
  145. *send_node = CreateStreamSwitchNode(kernel_graph, kSendOpName);
  146. MS_EXCEPTION_IF_NULL(*send_node);
  147. *recv_node = CreateStreamSwitchNode(kernel_graph, kRecvOpName);
  148. MS_EXCEPTION_IF_NULL(*recv_node);
  149. cudaEvent_t event = nullptr;
  150. CHECK_CUDA_RET_WITH_EXCEPT(cudaEventCreate(&event, cudaEventDisableTiming), "Creating cuda event failed.");
  151. AnfAlgo::SetNodeAttr("record_event", MakeValue(reinterpret_cast<uintptr_t>(event)), *send_node);
  152. AnfAlgo::SetNodeAttr("wait_event", MakeValue(reinterpret_cast<uintptr_t>(event)), *recv_node);
  153. uintptr_t send_stream = AnfAlgo::GetNodeAttr<uintptr_t>(mock_send_node, "stream_id");
  154. AnfAlgo::SetNodeAttr("record_event_stream", MakeValue(send_stream), *send_node);
  155. uintptr_t recv_stream = AnfAlgo::GetNodeAttr<uintptr_t>(mock_recv_node, "stream_id");
  156. AnfAlgo::SetNodeAttr("wait_event_stream", MakeValue(recv_stream), *recv_node);
  157. return true;
  158. }
  159. CNodePtr CreateStreamSwitchNode(const std::shared_ptr<session::KernelGraph> &kernel_graph, const std::string &name) {
  160. auto op = std::make_shared<Primitive>(name);
  161. auto apply = std::make_shared<ValueNode>(op);
  162. std::vector<AnfNodePtr> input_list = {apply};
  163. CNodePtr node = kernel_graph->NewCNode(input_list);
  164. MS_EXCEPTION_IF_NULL(node);
  165. kernel::KernelBuildInfo::KernelBuildInfoBuilder selected_kernel_builder;
  166. AnfAlgo::SetSelectKernelBuildInfo(selected_kernel_builder.Build(), node.get());
  167. auto abstract_none = std::make_shared<abstract::AbstractNone>();
  168. node->set_abstract(abstract_none);
  169. SetKernelInfo(node);
  170. return node;
  171. }
  172. } // namespace gpu
  173. } // namespace device
  174. } // namespace mindspore