You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_session.cc 26 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/session/gpu_session.h"
  17. #include <string>
  18. #include <utility>
  19. #include "backend/optimizer/common/helper.h"
  20. #include "backend/optimizer/common/optimizer.h"
  21. #include "backend/optimizer/common/pass_manager.h"
  22. #include "backend/optimizer/common/common_backend_optimization.h"
  23. #include "backend/optimizer/gpu/adam_weight_decay_fusion.h"
  24. #include "backend/optimizer/gpu/adam_fusion.h"
  25. #include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h"
  26. #include "backend/optimizer/gpu/apply_momentum_scale_fusion.h"
  27. #include "backend/optimizer/gpu/apply_momentum_weight_fusion.h"
  28. #include "backend/optimizer/gpu/batch_norm_relu_fusion.h"
  29. #include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h"
  30. #include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h"
  31. #include "backend/optimizer/gpu/post_batch_norm_add_relu_fusion.h"
  32. #include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h"
  33. #include "backend/optimizer/gpu/combine_momentum_fusion.h"
  34. #include "backend/optimizer/gpu/combine_cast_fusion.h"
  35. #include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
  36. #include "backend/optimizer/gpu/insert_format_transform_op.h"
  37. #include "backend/optimizer/gpu/replace_momentum_cast_fusion.h"
  38. #include "backend/optimizer/gpu/replace_addn_fusion.h"
  39. #include "backend/optimizer/gpu/print_reduce_fusion.h"
  40. #include "backend/optimizer/gpu/bce_with_logits_loss_fusion.h"
  41. #include "backend/optimizer/gpu/remove_format_transform_pair.h"
  42. #include "backend/optimizer/gpu/remove_redundant_format_transform.h"
  43. #include "backend/optimizer/gpu/reduce_precision_fusion.h"
  44. #include "backend/optimizer/gpu/relu_v2_pass.h"
  45. #include "backend/optimizer/gpu/add_relu_v2_fusion.h"
  46. #include "backend/optimizer/gpu/add_relu_grad_v2_fusion.h"
  47. #include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
  48. #include "backend/optimizer/pass/communication_op_fusion.h"
  49. #include "backend/optimizer/pass/getitem_tuple.h"
  50. #include "common/trans.h"
  51. #include "debug/anf_ir_dump.h"
  52. #include "debug/data_dump/e2e_dump.h"
  53. #ifdef ENABLE_DEBUGGER
  54. #include "debug/debugger/proto_exporter.h"
  55. #else
  56. #include "debug/debugger/proto_exporter_stub.h"
  57. #endif
  58. #include "debug/data_dump/dump_json_parser.h"
  59. #include "debug/tensor_load.h"
  60. #include "debug/dump_proto.h"
  61. #include "runtime/device/gpu/gpu_kernel_build.h"
  62. #include "runtime/device/gpu/gpu_kernel_runtime.h"
  63. #include "runtime/device/gpu/gpu_stream_assign.h"
  64. #include "runtime/device/gpu/kernel_info_setter.h"
  65. #include "runtime/device/kernel_runtime_manager.h"
  66. #include "runtime/device/gpu/cuda_driver.h"
  67. #include "runtime/device/gpu/distribution/collective_init.h"
  68. #include "runtime/device/gpu/gpu_bucket.h"
  69. #include "runtime/device/gpu/gpu_device_address.h"
  70. #include "utils/ms_utils.h"
  71. #include "utils/config_manager.h"
  72. #include "utils/ms_context.h"
  73. #include "utils/context/graph_kernel_flags.h"
  74. #include "utils/utils.h"
  75. #if ENABLE_CPU && ENABLE_GPU
  76. #include "ps/util.h"
  77. #include "ps/ps_cache/ps_cache_manager.h"
  78. #endif
  79. #ifdef ENABLE_DUMP_IR
  80. #include "debug/rdr/running_data_recorder.h"
  81. #endif
  82. namespace mindspore {
  83. namespace session {
  84. namespace gpu {
  85. using AnfAlgo = mindspore::session::AnfRuntimeAlgorithm;
  86. using CollectiveInitializer = device::gpu::CollectiveInitializer;
  87. using GetLocalRankId = device::gpu::GetLocalRankId;
  88. void GPUSession::Init(uint32_t device_id) {
  89. const void *collective_handle_ = CollectiveInitializer::instance().collective_handle();
  90. bool collective_inited = CollectiveInitializer::instance().collective_inited();
  91. if (collective_inited && collective_handle_ != nullptr) {
  92. auto get_local_rank_funcptr =
  93. reinterpret_cast<GetLocalRankId>(dlsym(const_cast<void *>(collective_handle_), "local_rank_id"));
  94. MS_EXCEPTION_IF_NULL(get_local_rank_funcptr);
  95. device_id = IntToUint((*get_local_rank_funcptr)());
  96. }
  97. bool ret = device::gpu::CudaDriver::SetDevice(UintToInt(device_id));
  98. if (!ret) {
  99. MS_LOG(EXCEPTION) << "GPUSession failed to set current device id:" << device_id;
  100. }
  101. auto ms_context = MsContext::GetInstance();
  102. MS_EXCEPTION_IF_NULL(ms_context);
  103. ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
  104. auto &json_parser = DumpJsonParser::GetInstance();
  105. // Dump json config file if dump is enabled
  106. json_parser.CopyJsonToDir();
  107. MS_LOG(INFO) << "Set device id " << device_id << " for gpu session.";
  108. InitExecutor(kGPUDevice, device_id);
  109. }
  110. void GPUSession::SelectKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  111. MS_EXCEPTION_IF_NULL(kernel_graph);
  112. device::gpu::FormatTransformChecker::GetInstance().CheckSupportFormatTransform(kernel_graph);
  113. for (const auto &kernel_node : kernel_graph->execution_order()) {
  114. MS_EXCEPTION_IF_NULL(kernel_node);
  115. device::gpu::SetKernelInfo(kernel_node);
  116. }
  117. }
  118. void GPUSession::StartKernelRT() const {
  119. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  120. MS_EXCEPTION_IF_NULL(runtime_instance);
  121. if (!runtime_instance->Init()) {
  122. MS_LOG(EXCEPTION) << "GPU start kernel runtime failed";
  123. }
  124. }
  125. void GPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  126. MS_EXCEPTION_IF_NULL(kernel_graph);
  127. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  128. auto pm = std::make_shared<opt::PassManager>();
  129. pm->AddPass(std::make_shared<opt::AdamWeightDecayFusion>());
  130. pm->AddPass(std::make_shared<opt::AdamFusion>());
  131. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayScaleFusion>());
  132. pm->AddPass(std::make_shared<opt::ApplyMomentumScaleFusion>());
  133. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayFusion>());
  134. if (!context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  135. pm->AddPass(std::make_shared<opt::CastAllFusion>("cast_all"));
  136. }
  137. pm->AddPass(std::make_shared<opt::CombineMomentumFusion>("combine_momentum"));
  138. pm->AddPass(std::make_shared<opt::ReplaceMomentumCastFusion>());
  139. pm->AddPass(std::make_shared<opt::ReplaceAddNFusion>());
  140. pm->AddPass(std::make_shared<opt::PrintReduceFusion>("print_reduce"));
  141. pm->AddPass(std::make_shared<opt::BCEWithLogitsLossFusion>());
  142. optimizer->AddPassManager(pm);
  143. (void)optimizer->Optimize(kernel_graph);
  144. kernel_graph->SetExecOrderByDefault();
  145. }
  146. void GPUSession::HardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  147. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  148. auto pm = std::make_shared<opt::PassManager>();
  149. pm->AddPass(std::make_shared<opt::BatchNormReluFusion>());
  150. pm->AddPass(std::make_shared<opt::BatchNormReluGradFusion>());
  151. pm->AddPass(std::make_shared<opt::BatchNormAddReluFusion>());
  152. pm->AddPass(std::make_shared<opt::PostBatchNormAddReluFusion>());
  153. pm->AddPass(std::make_shared<opt::BatchNormAddReluGradFusion>());
  154. pm->AddPass(std::make_shared<opt::InsertFormatTransformOp>());
  155. pm->AddPass(std::make_shared<opt::RemoveFormatTransformPair>());
  156. pm->AddPass(std::make_shared<opt::RemoveRedundantFormatTransform>());
  157. pm->AddPass(std::make_shared<opt::CudnnInplaceAggregate>());
  158. pm->AddPass(std::make_shared<opt::ReluV2Pass>());
  159. pm->AddPass(std::make_shared<opt::AddReluV2Fusion>());
  160. pm->AddPass(std::make_shared<opt::AddReluGradV2Fusion>());
  161. pm->AddPass(std::make_shared<opt::AllReduceFusion>());
  162. pm->AddPass(std::make_shared<opt::GetitemTuple>());
  163. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  164. optimizer->AddPassManager(pm);
  165. (void)optimizer->Optimize(kernel_graph);
  166. kernel_graph->SetExecOrderByDefault();
  167. }
  168. void GPUSession::RunOpHardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  169. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  170. auto pm = std::make_shared<opt::PassManager>();
  171. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  172. optimizer->AddPassManager(pm);
  173. (void)optimizer->Optimize(kernel_graph);
  174. kernel_graph->SetExecOrderByDefault();
  175. }
  176. void GPUSession::GraphKernelOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  177. if (!context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  178. return;
  179. }
  180. opt::GraphKernelOptimize(kernel_graph);
  181. kernel_graph->SetExecOrderByDefault();
  182. }
  183. void GPUSession::AssignStream(const std::shared_ptr<KernelGraph> &kernel_graph) {
  184. MS_EXCEPTION_IF_NULL(kernel_graph);
  185. device::gpu::AssignGpuStream(kernel_graph);
  186. }
  187. void GPUSession::BuildKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  188. auto kernels = kernel_graph->execution_order();
  189. device::gpu::CreateGPUKernel(kernels);
  190. }
  191. void GPUSession::AllocateMemory(KernelGraph *kernel_graph) const {
  192. MS_EXCEPTION_IF_NULL(kernel_graph);
  193. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  194. MS_EXCEPTION_IF_NULL(runtime_instance);
  195. runtime_instance->AssignMemory(kernel_graph);
  196. }
  197. void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  198. KernelGraph *kernel_graph) const {
  199. MS_EXCEPTION_IF_NULL(kernel_graph);
  200. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  201. MS_EXCEPTION_IF_NULL(runtime_instance);
  202. runtime_instance->RunOpAssignMemory(input_tensors, kernel_graph);
  203. }
  204. void GPUSession::RunOpGenKernelEvent(const KernelGraph *graph) const {
  205. MS_EXCEPTION_IF_NULL(graph);
  206. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  207. MS_EXCEPTION_IF_NULL(runtime_instance);
  208. runtime_instance->GenKernelEvents(graph);
  209. }
  210. void GPUSession::RunOpClearMemory(KernelGraph *kernel_graph) const {
  211. MS_EXCEPTION_IF_NULL(kernel_graph);
  212. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  213. MS_EXCEPTION_IF_NULL(runtime_instance);
  214. runtime_instance->RunOpClearMemory(kernel_graph);
  215. }
  216. namespace {
  217. constexpr auto kAssignInputSize = 3;
  218. constexpr auto kAssignUpdateIndex = 1;
  219. bool UpdatedByAssign(const KernelGraphPtr &kernel_graph, const AnfNodePtr &node) {
  220. MS_EXCEPTION_IF_NULL(kernel_graph);
  221. auto manager = kernel_graph->manager();
  222. if (manager == nullptr) {
  223. return false;
  224. }
  225. auto &node_users = manager->node_users();
  226. auto iter = node_users.find(node);
  227. if (iter == node_users.end()) {
  228. return false;
  229. }
  230. auto &users = iter->second;
  231. return std::any_of(users.begin(), users.end(), [](const std::pair<AnfNodePtr, int64_t> &user) {
  232. MS_EXCEPTION_IF_NULL(user.first);
  233. auto output_cnode = user.first->cast<CNodePtr>();
  234. return output_cnode != nullptr && IsPrimitiveCNode(output_cnode, prim::kPrimAssign) &&
  235. user.second == kAssignUpdateIndex && output_cnode->inputs().size() > kAssignInputSize;
  236. });
  237. }
  238. } // namespace
  239. void GPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
  240. const std::vector<tensor::TensorPtr> &inputs_const) const {
  241. std::vector<tensor::TensorPtr> inputs(inputs_const);
  242. MS_EXCEPTION_IF_NULL(kernel_graph);
  243. auto &input_nodes = kernel_graph->input_nodes();
  244. auto ms_context = MsContext::GetInstance();
  245. MS_EXCEPTION_IF_NULL(ms_context);
  246. if (inputs.size() != input_nodes.size()) {
  247. MS_LOG(EXCEPTION) << "Tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size();
  248. }
  249. for (size_t i = 0; i < inputs.size(); ++i) {
  250. auto tensor = inputs[i];
  251. MS_EXCEPTION_IF_NULL(tensor);
  252. auto input_node = input_nodes[i];
  253. MS_EXCEPTION_IF_NULL(input_node);
  254. if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
  255. #if ENABLE_CPU && ENABLE_GPU
  256. const std::string &param_name = input_node->fullname_with_scope();
  257. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  258. continue;
  259. }
  260. #endif
  261. auto pk_node = input_node->cast<ParameterPtr>();
  262. auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
  263. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  264. bool need_sync = false;
  265. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  266. if (tensor_address == nullptr || tensor_address != device_address) {
  267. need_sync = true;
  268. }
  269. } else if (tensor->NeedSyncHostToDevice() || tensor_address == nullptr) {
  270. need_sync = true;
  271. } else if (tensor_address != device_address) {
  272. if (tensor_address->DeviceType() == device_address->DeviceType()) {
  273. AnfAlgo::SetOutputAddr(tensor_address, 0, pk_node.get());
  274. } else {
  275. need_sync = true;
  276. }
  277. }
  278. if (need_sync) {
  279. if (AnfAlgo::IsParameterWeight(input_node->cast<ParameterPtr>()) || UpdatedByAssign(kernel_graph, input_node) ||
  280. ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode) {
  281. tensor->set_device_address(device_address);
  282. }
  283. MS_EXCEPTION_IF_NULL(device_address);
  284. if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0),
  285. LongToSize(tensor->data().nbytes()), tensor->data_type(),
  286. tensor->data_c())) {
  287. MS_LOG(EXCEPTION) << "SyncHostToDevice failed.";
  288. }
  289. }
  290. }
  291. tensor->set_sync_status(kNoNeedSync);
  292. }
  293. }
  294. GraphId GPUSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
  295. // Construct graph, if successfully, graph_sum_ + 1
  296. auto graph = ConstructKernelGraph(lst, outputs);
  297. MS_EXCEPTION_IF_NULL(graph);
  298. return CompileGraphImpl(graph);
  299. }
  300. GraphId GPUSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
  301. std::vector<KernelGraphPtr> all_graphs;
  302. auto root_graph = ConstructKernelGraph(func_graph, &all_graphs);
  303. MS_EXCEPTION_IF_NULL(root_graph);
  304. if (all_graphs.size() != 1) {
  305. MS_LOG(EXCEPTION) << "Gpu backend does not support multi-graph schedule. graph num" << all_graphs.size();
  306. }
  307. // Insert maketuple graph output in case of multi-outputs.
  308. // The ConvertTupleOutputToMaketuple pass will insert TupleGetItem.
  309. AnfAlgo::InsertMakeTupleForOutput(NOT_NULL(root_graph));
  310. opt::BackendCommonOptimization(root_graph);
  311. return CompileGraphImpl(root_graph);
  312. }
  313. GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
  314. // Prepare ms context info for dump .pb graph
  315. auto context_ptr = MsContext::GetInstance();
  316. MS_EXCEPTION_IF_NULL(context_ptr);
  317. bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
  318. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  319. MS_EXCEPTION_IF_NULL(runtime_instance);
  320. uint32_t device_id = runtime_instance->device_id();
  321. auto &json_parser = DumpJsonParser::GetInstance();
  322. json_parser.Parse();
  323. // Dump .pb graph before graph optimization
  324. if (save_graphs) {
  325. DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
  326. }
  327. // Graph optimization irrelevant to device data format
  328. Optimize(graph);
  329. // Select kernel build info
  330. SelectKernel(graph);
  331. // Graph optimization relevant to device data format
  332. HardwareOptimize(graph);
  333. // Run final optimization
  334. FinalOptimize(graph);
  335. // Graph kernel fusion optimization
  336. GraphKernelOptimize(graph);
  337. // Start gpu kernel runtime
  338. StartKernelRT();
  339. #if ENABLE_CPU && ENABLE_GPU
  340. InitPsWorker(graph);
  341. #endif
  342. // Assign CUDA streams
  343. AssignStream(graph);
  344. // Dump .pb graph before remove nop nodes
  345. if (save_graphs) {
  346. DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
  347. }
  348. // Update Graph Dynamic Shape Attr.
  349. UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
  350. graph->UpdateGraphDynamicAttr();
  351. const bool pynative_mode = context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
  352. // Hide NopOp from execution graph in graph mode
  353. if (!pynative_mode) {
  354. opt::HideNopNode(graph.get());
  355. }
  356. // Build kernel if node is cnode
  357. BuildKernel(graph);
  358. #ifdef ENABLE_DUMP_IR
  359. std::string name = "graph_build";
  360. DumpGraphParams dump_params = {true, static_cast<int>(kWholeStack)};
  361. mindspore::RDR::RecordAnfGraph(SubModuleId::SM_SESSION, name, graph, dump_params, ".ir,.pb");
  362. auto &kernels = graph->execution_order();
  363. std::string exec_order_name = "graph_exec_order." + std::to_string(graph->graph_id());
  364. mindspore::RDR::RecordGraphExecOrder(SubModuleId::SM_SESSION, exec_order_name, kernels);
  365. #endif
  366. // Get summary nodes.
  367. SetSummaryNodes(graph.get());
  368. // Dump .pb graph after graph optimization
  369. if (save_graphs) {
  370. DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
  371. }
  372. if (json_parser.e2e_dump_enabled()) {
  373. std::string final_graph = "trace_code_graph_" + std::to_string(graph->graph_id());
  374. std::string root_dir = json_parser.path() + "/" + json_parser.net_name() + "/device_" + std::to_string(device_id);
  375. std::string target_dir = root_dir + "/graphs";
  376. std::string ir_file_path = target_dir + "/" + "ms_output_" + final_graph + ".ir";
  377. DumpIRProtoWithSrcInfo(graph, final_graph, target_dir, kDebugWholeStack);
  378. DumpIR("trace_code_graph", graph, true, kWholeStack, ir_file_path);
  379. DumpGraphExeOrder("ms_execution_order_graph_" + std::to_string(graph->graph_id()) + ".csv", root_dir,
  380. graph->execution_order());
  381. }
  382. // Set graph manager.
  383. MS_EXCEPTION_IF_NULL(context_);
  384. FuncGraphManagerPtr manager = MakeManager({graph});
  385. context_->AddManager(manager);
  386. if (manager) {
  387. manager->AddFuncGraph(graph);
  388. graph->set_manager(manager);
  389. }
  390. InitAllBucket(graph);
  391. // Alloc memory in graph mode, including static memory and dynamic memory
  392. if (!pynative_mode) {
  393. AllocateMemory(graph.get());
  394. }
  395. DumpGraph(graph);
  396. #ifdef ENABLE_DEBUGGER
  397. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  398. debugger_->LoadGraphs(graph);
  399. }
  400. #endif
  401. MS_LOG(INFO) << "CompileGraph graph_id: " << graph->graph_id();
  402. return graph->graph_id();
  403. }
  404. void GPUSession::PreExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
  405. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  406. if (debugger_) {
  407. debugger_->PreExecute(kernel_graph, graph_sum_);
  408. }
  409. #if ENABLE_CPU && ENABLE_GPU
  410. // Initialize parameter server
  411. InitPSParamAndOptim(kernel_graph, inputs);
  412. #endif
  413. }
  414. void GPUSession::PostExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
  415. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  416. // Summary
  417. auto context_ptr = MsContext::GetInstance();
  418. MS_EXCEPTION_IF_NULL(context_ptr);
  419. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_GPU_SUMMARY)) {
  420. Summary(kernel_graph.get());
  421. }
  422. bool dump_enabled = DumpDataEnabledIteration();
  423. // debug used for dump
  424. if (debugger_ && dump_enabled) {
  425. Dump(kernel_graph);
  426. } else {
  427. DumpJsonParser::GetInstance().UpdateDumpIter();
  428. }
  429. if (debugger_) {
  430. debugger_->PostExecute(kernel_graph);
  431. }
  432. }
  433. void GPUSession::ExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph) {
  434. int kernel_num = kernel_graph->execution_order().size();
  435. int64_t loopsize = (kernel_num > 1) ? ConfigManager::GetInstance().gpu_loopsink_size() : 1;
  436. for (int64_t i = 0; i < loopsize; i++) {
  437. #if ENABLE_CPU && ENABLE_GPU
  438. std::string channel_name;
  439. if (ps::PsDataPrefetch::GetInstance().cache_enable() && IsGetNextGraph(kernel_graph, &channel_name)) {
  440. ps::ps_cache_instance.IncreaseGraphStep(channel_name);
  441. }
  442. #endif
  443. Execute(kernel_graph);
  444. }
  445. }
  446. void GPUSession::UpdateOutputTensors(const VectorRef *outputs,
  447. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node) {
  448. MS_EXCEPTION_IF_NULL(outputs);
  449. for (const auto &item : *outputs) {
  450. if (utils::isa<VectorRefPtr>(item)) {
  451. const auto &vector_ref = utils::cast<VectorRef>(item);
  452. UpdateOutputTensors(&vector_ref, tensor_to_node);
  453. } else if (utils::isa<tensor::TensorPtr>(item)) {
  454. const auto &tensor = utils::cast<tensor::TensorPtr>(item);
  455. MS_EXCEPTION_IF_NULL(tensor);
  456. const auto &iter = tensor_to_node.find(tensor);
  457. if (iter != tensor_to_node.end()) {
  458. const auto &node = iter->second.first;
  459. const auto &output_index = iter->second.second;
  460. MS_EXCEPTION_IF_NULL(node);
  461. const auto &address = AnfAlgo::GetMutableOutputAddr(node, output_index);
  462. // The outputs may have the same tensor, so need skip when the tensor has been set to device address.
  463. if ((address == nullptr) || (address->GetPtr() == nullptr)) {
  464. continue;
  465. }
  466. tensor->set_device_address(address);
  467. // When the device address of graph cnode output is set in tensor, the graph output need be set new device
  468. // address, to avoid that the device address context of tensor be rewritten in the next step or next loop.
  469. // But one time memory application scenarios need to be skipped, because the memory is not allocated next step:
  470. // 1. Non cnode 2. Communication kernel.
  471. if (node->isa<CNode>() && !AnfAlgo::IsCommunicationOp(node)) {
  472. auto new_address = std::make_shared<device::gpu::GPUDeviceAddress>(nullptr, address->GetSize());
  473. AnfAlgo::SetOutputAddr(new_address, output_index, node.get());
  474. if (context::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  475. auto runtime_instance =
  476. device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  477. MS_EXCEPTION_IF_NULL(runtime_instance);
  478. auto gpu_runtime_instance = dynamic_cast<device::gpu::GPUKernelRuntime *>(runtime_instance);
  479. gpu_runtime_instance->SetAddrInvalid(address);
  480. }
  481. }
  482. if (AnfAlgo::IsDynamicShape(node)) {
  483. const auto &updated_shape = AnfAlgo::GetOutputInferShape(node, output_index);
  484. ShapeVector int_shape;
  485. std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
  486. tensor->set_shape(int_shape);
  487. }
  488. }
  489. if (tensor->NeedSyncDeviceToHostImmediately()) {
  490. tensor->data_sync(false);
  491. tensor->set_device_address(nullptr);
  492. tensor->set_sync_status(kNeedSyncHostToDevice);
  493. }
  494. }
  495. }
  496. }
  497. void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  498. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  499. MS_EXCEPTION_IF_NULL(runtime_instance);
  500. if (!runtime_instance->Run(kernel_graph.get(), false)) {
  501. MS_LOG(EXCEPTION) << "GPU execute graph failed!";
  502. }
  503. }
  504. void GPUSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
  505. const std::vector<tensor::TensorPtr> &input_tensors,
  506. const std::vector<int64_t> &tensors_mask) {
  507. // Check if the graph cache exists.
  508. if (run_op_graphs_.find(graph_info) != run_op_graphs_.end() &&
  509. kOpCacheAllowList.find(op_run_info.op_name) == kOpCacheAllowList.end()) {
  510. return;
  511. }
  512. // Prepare the graph
  513. auto kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask);
  514. MS_EXCEPTION_IF_NULL(kernel_graph);
  515. SelectKernel(kernel_graph);
  516. RunOpHardwareOptimize(kernel_graph);
  517. StartKernelRT();
  518. RunOpHideNopNode(kernel_graph);
  519. BuildKernel(kernel_graph);
  520. run_op_graphs_[graph_info] = kernel_graph;
  521. }
  522. void GPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
  523. std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
  524. const std::vector<int64_t> &tensors_mask) {
  525. MS_EXCEPTION_IF_NULL(input_tensors);
  526. MS_EXCEPTION_IF_NULL(op_run_info);
  527. BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask);
  528. EraseValueNodeTensor(tensors_mask, input_tensors);
  529. // wait for allreduce
  530. for (auto &tensor : *input_tensors) {
  531. if (tensor->NeedWaitDevice()) {
  532. tensor->WaitDevice();
  533. }
  534. }
  535. // run op
  536. auto kernel_graph = run_op_graphs_[graph_info];
  537. MS_EXCEPTION_IF_NULL(kernel_graph);
  538. RunOpRemoveNopNode(kernel_graph);
  539. RunOpAllocateMemory(*input_tensors, kernel_graph.get());
  540. RunOpGenKernelEvent(kernel_graph.get());
  541. // Execute the computation
  542. LoadInputData(kernel_graph, *input_tensors);
  543. Execute(kernel_graph);
  544. // Fetch outputs
  545. UpdateOutputs(kernel_graph, outputs, *input_tensors);
  546. // update output abstract of dynamic op to op_run_info
  547. if (op_run_info->is_dynamic_shape) {
  548. UpdateOutputAbstract(kernel_graph, op_run_info);
  549. }
  550. RunOpClearMemory(kernel_graph.get());
  551. if (kOpCacheAllowList.find(op_run_info->op_name) != kOpCacheAllowList.end()) {
  552. run_op_graphs_.erase(graph_info);
  553. }
  554. }
  555. void GPUSession::Dump(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  556. if (debugger_->DebuggerBackendEnabled()) {
  557. MS_EXCEPTION_IF_NULL(kernel_graph);
  558. E2eDump::DumpData(kernel_graph.get(), device_id_, debugger_.get());
  559. } else {
  560. DumpJsonParser::GetInstance().UpdateDumpIter();
  561. }
  562. }
  563. bool GPUSession::DumpDataEnabledIteration() const {
  564. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  565. MS_EXCEPTION_IF_NULL(runtime_instance);
  566. return runtime_instance->DumpDataEnabledIteration();
  567. }
  568. void GPUSession::SyncStream() {
  569. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  570. MS_EXCEPTION_IF_NULL(runtime_instance);
  571. auto ret = runtime_instance->SyncStream();
  572. if (!ret) {
  573. MS_LOG(EXCEPTION) << "Sync stream error!";
  574. }
  575. }
  576. std::shared_ptr<device::Bucket> GPUSession::CreateBucket(uint32_t bucket_id, uint32_t bucket_size) {
  577. return std::make_shared<device::gpu::GPUBucket>(bucket_id, bucket_size);
  578. }
  579. } // namespace gpu
  580. } // namespace session
  581. } // namespace mindspore