You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_session.cc 10 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "session/gpu_session.h"
  17. #include "device/gpu/kernel_info_setter.h"
  18. #include "device/gpu/gpu_kernel_build.h"
  19. #include "device/gpu/gpu_kernel_runtime.h"
  20. #include "device/gpu/gpu_stream_assign.h"
  21. #include "pre_activate/common/optimizer.h"
  22. #include "pre_activate/common/pass_manager.h"
  23. #include "pre_activate/common/helper.h"
  24. #include "pre_activate/pass/communication_op_fusion.h"
  25. #include "pre_activate/pass/getitem_tuple.h"
  26. #include "pre_activate/gpu/adam_weight_decay_fusion.h"
  27. #include "pre_activate/gpu/adam_fusion.h"
  28. #include "device/kernel_runtime_manager.h"
  29. #include "predict/predict.h"
  30. #include "common/utils.h"
  31. #include "common/trans.h"
  32. #include "utils/context/ms_context.h"
  33. #include "utils/base_ref_extends.h"
  34. namespace mindspore {
  35. namespace session {
  36. namespace gpu {
  37. using AnfAlgo = mindspore::session::AnfRuntimeAlgorithm;
  38. void GPUSession::SelectKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  39. MS_EXCEPTION_IF_NULL(kernel_graph);
  40. for (const auto &kernel_node : kernel_graph->execution_order()) {
  41. MS_EXCEPTION_IF_NULL(kernel_node);
  42. device::gpu::SetKernelInfo(kernel_node);
  43. }
  44. }
  45. void GPUSession::StartKernelRT() const {
  46. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  47. MS_EXCEPTION_IF_NULL(runtime_instance);
  48. if (!runtime_instance->Init()) {
  49. MS_LOG(EXCEPTION) << "GPU start kernel runtime failed";
  50. }
  51. }
  52. void GPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  53. MS_EXCEPTION_IF_NULL(kernel_graph);
  54. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  55. auto pm = std::make_shared<opt::PassManager>();
  56. pm->AddPass(std::make_shared<opt::AdamWeightDecayFusion>());
  57. pm->AddPass(std::make_shared<opt::AdamFusion>());
  58. optimizer->AddPassManager(pm);
  59. (void)optimizer->Optimize(kernel_graph);
  60. kernel_graph->SetExecOrderByDefault();
  61. }
  62. void GPUSession::HardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  63. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  64. auto pm = std::make_shared<opt::PassManager>();
  65. pm->AddPass(std::make_shared<opt::AllReduceFusion>());
  66. pm->AddPass(std::make_shared<opt::GetitemTuple>());
  67. optimizer->AddPassManager(pm);
  68. (void)optimizer->Optimize(kernel_graph);
  69. kernel_graph->SetExecOrderByDefault();
  70. }
  71. void GPUSession::AssignStream(const std::shared_ptr<KernelGraph> &kernel_graph) {
  72. MS_EXCEPTION_IF_NULL(kernel_graph);
  73. device::gpu::AssignGpuStream(kernel_graph);
  74. }
  75. void GPUSession::BuildKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  76. device::gpu::GpuBuild(kernel_graph);
  77. }
  78. void GPUSession::AllocateMemory(KernelGraph *kernel_graph) const {
  79. MS_EXCEPTION_IF_NULL(kernel_graph);
  80. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  81. MS_EXCEPTION_IF_NULL(runtime_instance);
  82. runtime_instance->AssignMemory(kernel_graph);
  83. }
  84. void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  85. KernelGraph *kernel_graph) const {
  86. MS_EXCEPTION_IF_NULL(kernel_graph);
  87. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  88. MS_EXCEPTION_IF_NULL(runtime_instance);
  89. runtime_instance->RunOpAssignMemory(input_tensors, kernel_graph);
  90. }
  91. void GPUSession::RunOpClearMemory(KernelGraph *kernel_graph) const {
  92. MS_EXCEPTION_IF_NULL(kernel_graph);
  93. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  94. MS_EXCEPTION_IF_NULL(runtime_instance);
  95. runtime_instance->RunOpClearMemory(kernel_graph);
  96. }
  97. void GPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
  98. const std::vector<tensor::TensorPtr> &inputs_const) const {
  99. std::vector<tensor::TensorPtr> inputs(inputs_const);
  100. MS_EXCEPTION_IF_NULL(kernel_graph);
  101. auto input_nodes = kernel_graph->inputs();
  102. auto ms_context = MsContext::GetInstance();
  103. MS_EXCEPTION_IF_NULL(ms_context);
  104. for (size_t i = 0; i < inputs.size(); ++i) {
  105. auto tensor = inputs[i];
  106. MS_EXCEPTION_IF_NULL(tensor);
  107. auto input_node = input_nodes[i];
  108. MS_EXCEPTION_IF_NULL(input_node);
  109. if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
  110. auto pk_node = input_node->cast<ParameterPtr>();
  111. auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
  112. auto tensor_address = tensor->device_address();
  113. bool need_sync = false;
  114. if (ms_context->enable_pynative_infer()) {
  115. if (tensor_address == nullptr || tensor_address != device_address) {
  116. need_sync = true;
  117. }
  118. } else if (tensor->is_dirty() || tensor_address == nullptr) {
  119. need_sync = true;
  120. } else if (tensor_address != device_address) {
  121. if (tensor_address->DeviceType() == device_address->DeviceType()) {
  122. AnfAlgo::SetOutputAddr(tensor_address, 0, pk_node.get());
  123. } else {
  124. need_sync = true;
  125. }
  126. }
  127. if (need_sync) {
  128. tensor->set_device_address(device_address);
  129. MS_EXCEPTION_IF_NULL(device_address);
  130. if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0),
  131. LongToSize(tensor->data().nbytes()), tensor->data_type(),
  132. tensor->data_c())) {
  133. MS_LOG(EXCEPTION) << "SyncHostToDevice failed.";
  134. }
  135. }
  136. }
  137. tensor->set_dirty(false);
  138. }
  139. }
  140. void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  141. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  142. MS_EXCEPTION_IF_NULL(runtime_instance);
  143. if (!runtime_instance->Run(kernel_graph.get())) {
  144. MS_LOG(EXCEPTION) << "GPU execute graph failed!";
  145. }
  146. }
  147. GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
  148. // Construct graph, if successfully, graph_sum_ + 1
  149. auto graph_id = graph_sum_;
  150. auto graph = ConstructKernelGraph(lst, outputs);
  151. MS_EXCEPTION_IF_NULL(graph);
  152. // Optimize
  153. Optimize(graph);
  154. // Select kernel build info
  155. SelectKernel(graph);
  156. // Convert kernel Graph to model
  157. predictmodel::StepConvertGraph(graph);
  158. // Start gpu kernel runtime
  159. StartKernelRT();
  160. // HardwareOptimize
  161. HardwareOptimize(graph);
  162. // Assign CUDA streams
  163. AssignStream(graph);
  164. // Hide NoOp from execution graph
  165. opt::HideNopNode(graph.get());
  166. // Build kernel if node is cnode
  167. BuildKernel(graph);
  168. // Set graph execution order before memory alloc, ensure that memory alloc is according to the reorder graph
  169. auto execution_order = graph->execution_order();
  170. Reorder(&execution_order);
  171. graph->set_execution_order(execution_order);
  172. // Get summary nodes.
  173. GetSummaryNodes(graph.get());
  174. // Remove NoOp from execution graph
  175. opt::RemoveNopNode(graph.get());
  176. // Alloc memory, including static memory and dynamic memory
  177. AllocateMemory(graph.get());
  178. MS_EXCEPTION_IF_NULL(context_);
  179. FuncGraphManagerPtr manager = MakeManager({graph});
  180. context_->AddManager(manager);
  181. if (manager) {
  182. manager->AddFuncGraph(graph);
  183. graph->set_manager(manager);
  184. }
  185. return graph_id;
  186. }
  187. void GPUSession::RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  188. auto &kernel_graph = graphs_[graph_id];
  189. // Load input data from user input
  190. LoadInputData(kernel_graph, inputs);
  191. MS_EXCEPTION_IF_NULL(kernel_graph);
  192. // Convert inputs to model
  193. predictmodel::StepConvertWeight(inputs);
  194. {
  195. py::gil_scoped_release gil_release;
  196. // Run graph on GPU
  197. Execute(kernel_graph);
  198. }
  199. // Get result from GPU
  200. UpdateOutputs(kernel_graph, outputs, inputs);
  201. // Summary
  202. auto context_ptr = MsContext::GetInstance();
  203. MS_EXCEPTION_IF_NULL(context_ptr);
  204. if (context_ptr->enable_gpu_summary()) {
  205. Summary(kernel_graph.get());
  206. }
  207. }
  208. void GPUSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
  209. const std::vector<tensor::TensorPtr> &input_tensors, const std::vector<int> &tensors_mask) {
  210. // Check if the graph cache exists.
  211. if (run_op_graphs_.find(graph_info) != run_op_graphs_.end()) {
  212. return;
  213. }
  214. // Prepare the graph
  215. auto kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask);
  216. MS_EXCEPTION_IF_NULL(kernel_graph);
  217. SelectKernel(kernel_graph);
  218. StartKernelRT();
  219. // Hide NoOp from execution graph
  220. opt::HideNopNode(kernel_graph.get());
  221. BuildKernel(kernel_graph);
  222. run_op_graphs_[graph_info] = kernel_graph;
  223. }
  224. py::tuple GPUSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
  225. const std::vector<tensor::TensorPtr> &input_tensors) {
  226. auto kernel_graph = run_op_graphs_[graph_info];
  227. MS_EXCEPTION_IF_NULL(kernel_graph);
  228. // Remove NoOp from execution graph
  229. opt::RemoveNopNode(kernel_graph.get());
  230. RunOpAllocateMemory(input_tensors, kernel_graph.get());
  231. // Execute the computation
  232. LoadInputData(kernel_graph, input_tensors);
  233. Execute(kernel_graph);
  234. // Fetch outputs
  235. VectorRef outputs;
  236. UpdateOutputs(kernel_graph, &outputs, input_tensors);
  237. // Trans output to tuple
  238. auto output_tensors = TransformBaseRefListToTuple(outputs);
  239. if (!utils::isa<PyObjectRef>(output_tensors) ||
  240. !py::isinstance<py::tuple>(utils::cast<PyObjectRef>(output_tensors).object_)) {
  241. MS_EXCEPTION(NotSupportError) << "The output tensors should be a tuple !";
  242. }
  243. py::object tuple_obj = utils::cast<PyObjectRef>(output_tensors).object_;
  244. py::tuple tuple_tensors = py::cast<py::tuple>(tuple_obj);
  245. RunOpClearMemory(kernel_graph.get());
  246. return tuple_tensors;
  247. }
  248. } // namespace gpu
  249. } // namespace session
  250. } // namespace mindspore