You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_session.cc 24 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/session/gpu_session.h"
  17. #include <string>
  18. #include "backend/optimizer/common/helper.h"
  19. #include "backend/optimizer/common/optimizer.h"
  20. #include "backend/optimizer/common/pass_manager.h"
  21. #include "backend/optimizer/common/common_backend_optimization.h"
  22. #include "backend/optimizer/gpu/adam_weight_decay_fusion.h"
  23. #include "backend/optimizer/gpu/adam_fusion.h"
  24. #include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h"
  25. #include "backend/optimizer/gpu/apply_momentum_scale_fusion.h"
  26. #include "backend/optimizer/gpu/apply_momentum_weight_fusion.h"
  27. #include "backend/optimizer/gpu/batch_norm_relu_fusion.h"
  28. #include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h"
  29. #include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h"
  30. #include "backend/optimizer/gpu/post_batch_norm_add_relu_fusion.h"
  31. #include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h"
  32. #include "backend/optimizer/gpu/combine_momentum_fusion.h"
  33. #include "backend/optimizer/gpu/combine_cast_fusion.h"
  34. #include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
  35. #include "backend/optimizer/gpu/insert_format_transform_op.h"
  36. #include "backend/optimizer/gpu/replace_momentum_cast_fusion.h"
  37. #include "backend/optimizer/gpu/replace_addn_fusion.h"
  38. #include "backend/optimizer/gpu/remove_format_transform_pair.h"
  39. #include "backend/optimizer/gpu/remove_redundant_format_transform.h"
  40. #include "backend/optimizer/gpu/reduce_precision_fusion.h"
  41. #include "backend/optimizer/gpu/relu_v2_pass.h"
  42. #include "backend/optimizer/gpu/add_relu_v2_fusion.h"
  43. #include "backend/optimizer/gpu/add_relu_grad_v2_fusion.h"
  44. #include "backend/optimizer/graph_kernel/add_atomic_clean_gpu.h"
  45. #include "backend/optimizer/graph_kernel/add_stitch_atomic_clean_gpu.h"
  46. #include "backend/optimizer/graph_kernel/arithmetic_simplify.h"
  47. #include "backend/optimizer/graph_kernel/basic_ops_fusion.h"
  48. #include "backend/optimizer/graph_kernel/clean_all_in_once.h"
  49. #include "backend/optimizer/graph_kernel/depend_formater.h"
  50. #include "backend/optimizer/graph_kernel/eliminate_redundant_output.h"
  51. #include "backend/optimizer/graph_kernel/tensor_promotion.h"
  52. #include "backend/optimizer/graph_kernel/graph_kernel_splitter.h"
  53. #include "backend/optimizer/graph_kernel/graph_kernel_expander.h"
  54. #include "backend/optimizer/graph_kernel/raise_reduction_precision.h"
  55. #include "backend/optimizer/graph_kernel/graph_kernel_cse.h"
  56. #include "backend/optimizer/graph_kernel/shape_ops_splitter.h"
  57. #include "backend/optimizer/graph_kernel/value_graph_binder.h"
  58. #include "backend/optimizer/graph_kernel/parallel_fusion.h"
  59. #include "backend/optimizer/graph_kernel/optimize_assign.h"
  60. #include "backend/optimizer/graph_kernel/split_assign.h"
  61. #include "backend/optimizer/pass/communication_op_fusion.h"
  62. #include "backend/optimizer/pass/getitem_tuple.h"
  63. #include "common/trans.h"
  64. #include "debug/data_dump/e2e_dump_util.h"
  65. #include "debug/tensor_load.h"
  66. #include "debug/dump_proto.h"
  67. #include "runtime/device/gpu/gpu_kernel_build.h"
  68. #include "runtime/device/gpu/gpu_kernel_runtime.h"
  69. #include "runtime/device/gpu/gpu_stream_assign.h"
  70. #include "runtime/device/gpu/kernel_info_setter.h"
  71. #include "runtime/device/kernel_runtime_manager.h"
  72. #include "runtime/device/gpu/cuda_driver.h"
  73. #include "runtime/device/gpu/distribution/collective_init.h"
  74. #include "utils/ms_utils.h"
  75. #include "utils/config_manager.h"
  76. #include "utils/ms_context.h"
  77. #if ENABLE_CPU && ENABLE_GPU
  78. #include "ps/util.h"
  79. #include "ps/ps_cache/ps_cache_manager.h"
  80. #endif
  81. namespace mindspore {
  82. namespace session {
  83. namespace gpu {
  84. using AnfAlgo = mindspore::session::AnfRuntimeAlgorithm;
  85. using CollectiveInitializer = device::gpu::CollectiveInitializer;
  86. using GetLocalRankId = device::gpu::GetLocalRankId;
  87. void GPUSession::Init(uint32_t device_id) {
  88. const void *collective_handle_ = CollectiveInitializer::instance().collective_handle();
  89. bool collective_inited = CollectiveInitializer::instance().collective_inited();
  90. if (collective_inited && collective_handle_ != nullptr) {
  91. auto get_local_rank_funcptr =
  92. reinterpret_cast<GetLocalRankId>(dlsym(const_cast<void *>(collective_handle_), "local_rank_id"));
  93. MS_EXCEPTION_IF_NULL(get_local_rank_funcptr);
  94. device_id = IntToUint((*get_local_rank_funcptr)());
  95. }
  96. bool ret = device::gpu::CudaDriver::set_current_device(UintToInt(device_id));
  97. if (!ret) {
  98. MS_LOG(EXCEPTION) << "GPUSession failed to set current device id.";
  99. }
  100. auto ms_context = MsContext::GetInstance();
  101. MS_EXCEPTION_IF_NULL(ms_context);
  102. ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
  103. MS_LOG(INFO) << "Set device id " << device_id << " for gpu session.";
  104. InitExecutor(kGPUDevice, device_id);
  105. }
  106. void GPUSession::SelectKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  107. MS_EXCEPTION_IF_NULL(kernel_graph);
  108. device::gpu::FormatTransformChecker::GetInstance().CheckSupportFormatTransform(kernel_graph);
  109. for (const auto &kernel_node : kernel_graph->execution_order()) {
  110. MS_EXCEPTION_IF_NULL(kernel_node);
  111. device::gpu::SetKernelInfo(kernel_node);
  112. }
  113. }
  114. void GPUSession::StartKernelRT() const {
  115. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  116. MS_EXCEPTION_IF_NULL(runtime_instance);
  117. if (!runtime_instance->Init()) {
  118. MS_LOG(EXCEPTION) << "GPU start kernel runtime failed";
  119. }
  120. }
  121. void GPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  122. MS_EXCEPTION_IF_NULL(kernel_graph);
  123. auto context_ptr = MsContext::GetInstance();
  124. MS_EXCEPTION_IF_NULL(context_ptr);
  125. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  126. auto pm = std::make_shared<opt::PassManager>();
  127. pm->AddPass(std::make_shared<opt::AdamWeightDecayFusion>());
  128. pm->AddPass(std::make_shared<opt::AdamFusion>());
  129. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayScaleFusion>());
  130. pm->AddPass(std::make_shared<opt::ApplyMomentumScaleFusion>());
  131. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayFusion>());
  132. if (!(context_ptr->get_param<bool>(MS_CTX_ENABLE_GRAPH_KERNEL))) {
  133. pm->AddPass(std::make_shared<opt::CastAllFusion>("cast_all"));
  134. }
  135. pm->AddPass(std::make_shared<opt::CombineMomentumFusion>("combine_momentum"));
  136. pm->AddPass(std::make_shared<opt::ReplaceMomentumCastFusion>());
  137. pm->AddPass(std::make_shared<opt::ReplaceAddNFusion>());
  138. optimizer->AddPassManager(pm);
  139. (void)optimizer->Optimize(kernel_graph);
  140. kernel_graph->SetExecOrderByDefault();
  141. }
  142. void GPUSession::HardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  143. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  144. auto pm = std::make_shared<opt::PassManager>();
  145. pm->AddPass(std::make_shared<opt::BatchNormReluFusion>());
  146. pm->AddPass(std::make_shared<opt::BatchNormReluGradFusion>());
  147. pm->AddPass(std::make_shared<opt::BatchNormAddReluFusion>());
  148. pm->AddPass(std::make_shared<opt::PostBatchNormAddReluFusion>());
  149. pm->AddPass(std::make_shared<opt::BatchNormAddReluGradFusion>());
  150. pm->AddPass(std::make_shared<opt::InsertFormatTransformOp>());
  151. pm->AddPass(std::make_shared<opt::RemoveFormatTransformPair>());
  152. pm->AddPass(std::make_shared<opt::RemoveRedundantFormatTransform>());
  153. pm->AddPass(std::make_shared<opt::CudnnInplaceAggregate>());
  154. pm->AddPass(std::make_shared<opt::ReluV2Pass>());
  155. pm->AddPass(std::make_shared<opt::AddReluV2Fusion>());
  156. pm->AddPass(std::make_shared<opt::AddReluGradV2Fusion>());
  157. pm->AddPass(std::make_shared<opt::AllReduceFusion>());
  158. pm->AddPass(std::make_shared<opt::GetitemTuple>());
  159. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  160. optimizer->AddPassManager(pm);
  161. (void)optimizer->Optimize(kernel_graph);
  162. kernel_graph->SetExecOrderByDefault();
  163. }
  164. void GPUSession::RunOpHardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  165. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  166. auto pm = std::make_shared<opt::PassManager>();
  167. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  168. optimizer->AddPassManager(pm);
  169. (void)optimizer->Optimize(kernel_graph);
  170. kernel_graph->SetExecOrderByDefault();
  171. }
  172. void GPUSession::GraphKernelOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  173. auto context_ptr = MsContext::GetInstance();
  174. MS_EXCEPTION_IF_NULL(context_ptr);
  175. if (!(context_ptr->get_param<bool>(MS_CTX_ENABLE_GRAPH_KERNEL))) {
  176. return;
  177. }
  178. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  179. auto pm = std::make_shared<opt::PassManager>("graph_kernel_pm");
  180. std::vector<PrimitivePtr> duplicated_ops = {prim::kPrimReshape, prim::kPrimExpandDims, prim::kPrimCast};
  181. pm->AddPass(std::make_shared<opt::SplitAssign>());
  182. pm->AddPass(std::make_shared<opt::DependFormater>()); // Make more fusion opportunity.
  183. pm->AddPass(std::make_shared<opt::GraphKernelExpander>());
  184. pm->AddPass(std::make_shared<opt::BasicOpsFusion>());
  185. pm->AddPass(std::make_shared<opt::EliminateRedundantOutput>());
  186. pm->AddPass(std::make_shared<opt::OptimizeAssign>());
  187. pm->AddPass(std::make_shared<opt::EliminateRedundantOutput>());
  188. pm->AddPass(std::make_shared<opt::RaiseReductionPrecision>());
  189. pm->AddPass(std::make_shared<opt::GraphKernelCSE>());
  190. pm->AddPass(std::make_shared<opt::ArithmeticSimplify>());
  191. pm->AddPass(std::make_shared<opt::GraphKernelCSE>());
  192. pm->AddPass(std::make_shared<opt::TensorPromotion>());
  193. pm->AddPass(std::make_shared<opt::ShapeOpsSplitter>(duplicated_ops));
  194. pm->AddPass(std::make_shared<opt::GraphKernelSplitter>());
  195. pm->AddPass(std::make_shared<opt::GraphKernelCSE>());
  196. // The CSE may output a graph with repeated outputs.
  197. pm->AddPass(std::make_shared<opt::EliminateRedundantOutput>());
  198. // After Simplify and Splitter, a lot of redundant getitem/maketuple
  199. // will be exposed, use GetitemTuple Pass to delete them.
  200. pm->AddPass(std::make_shared<opt::GetitemTuple>());
  201. pm->AddPass(std::make_shared<opt::AtomicCleanInsertter>());
  202. pm->AddPass(std::make_shared<opt::StitchAtomicCleanInsertter>());
  203. pm->AddPass(std::make_shared<opt::DependFormater>()); // Prevent fake loop in parallel fusion.
  204. pm->AddPass(std::make_shared<opt::ParallelOpFusion>(kGPUDevice, opt::ParallelConfig(7)));
  205. pm->AddPass(std::make_shared<opt::BindValueToGraph>());
  206. optimizer->AddPassManager(pm);
  207. (void)optimizer->Optimize(kernel_graph);
  208. kernel_graph->SetExecOrderByDefault();
  209. }
  210. void GPUSession::AssignStream(const std::shared_ptr<KernelGraph> &kernel_graph) {
  211. MS_EXCEPTION_IF_NULL(kernel_graph);
  212. device::gpu::AssignGpuStream(kernel_graph);
  213. }
  214. void GPUSession::BuildKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  215. device::gpu::GpuBuild(kernel_graph);
  216. }
  217. void GPUSession::AllocateMemory(KernelGraph *kernel_graph) const {
  218. MS_EXCEPTION_IF_NULL(kernel_graph);
  219. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  220. MS_EXCEPTION_IF_NULL(runtime_instance);
  221. runtime_instance->AssignMemory(kernel_graph);
  222. }
  223. void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  224. KernelGraph *kernel_graph) const {
  225. MS_EXCEPTION_IF_NULL(kernel_graph);
  226. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  227. MS_EXCEPTION_IF_NULL(runtime_instance);
  228. runtime_instance->RunOpAssignMemory(input_tensors, kernel_graph);
  229. }
  230. void GPUSession::RunOpClearMemory(KernelGraph *kernel_graph) const {
  231. MS_EXCEPTION_IF_NULL(kernel_graph);
  232. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  233. MS_EXCEPTION_IF_NULL(runtime_instance);
  234. runtime_instance->RunOpClearMemory(kernel_graph);
  235. }
  236. namespace {
  237. constexpr auto kAssignInputSize = 3;
  238. constexpr auto kAssignUpdateIndex = 1;
  239. bool UpdatedByAssign(const KernelGraphPtr &kernel_graph, const AnfNodePtr &node) {
  240. MS_EXCEPTION_IF_NULL(kernel_graph);
  241. auto manager = kernel_graph->manager();
  242. if (manager == nullptr) {
  243. return false;
  244. }
  245. auto &node_users = manager->node_users();
  246. auto iter = node_users.find(node);
  247. if (iter == node_users.end()) {
  248. return false;
  249. }
  250. auto &users = iter->second;
  251. return std::any_of(users.begin(), users.end(), [](const std::pair<AnfNodePtr, int64_t> &user) {
  252. MS_EXCEPTION_IF_NULL(user.first);
  253. auto output_cnode = user.first->cast<CNodePtr>();
  254. return output_cnode != nullptr && IsPrimitiveCNode(output_cnode, prim::kPrimAssign) &&
  255. user.second == kAssignUpdateIndex && output_cnode->inputs().size() > kAssignInputSize;
  256. });
  257. }
  258. } // namespace
  259. void GPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
  260. const std::vector<tensor::TensorPtr> &inputs_const) const {
  261. std::vector<tensor::TensorPtr> inputs(inputs_const);
  262. MS_EXCEPTION_IF_NULL(kernel_graph);
  263. auto &input_nodes = kernel_graph->input_nodes();
  264. auto ms_context = MsContext::GetInstance();
  265. MS_EXCEPTION_IF_NULL(ms_context);
  266. if (inputs.size() != input_nodes.size()) {
  267. MS_LOG(EXCEPTION) << "Tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size();
  268. }
  269. for (size_t i = 0; i < inputs.size(); ++i) {
  270. auto tensor = inputs[i];
  271. MS_EXCEPTION_IF_NULL(tensor);
  272. auto input_node = input_nodes[i];
  273. MS_EXCEPTION_IF_NULL(input_node);
  274. if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
  275. #if ENABLE_CPU && ENABLE_GPU
  276. const std::string &param_name = input_node->fullname_with_scope();
  277. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  278. continue;
  279. }
  280. #endif
  281. auto pk_node = input_node->cast<ParameterPtr>();
  282. auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
  283. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  284. bool need_sync = false;
  285. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  286. if (tensor_address == nullptr || tensor_address != device_address) {
  287. need_sync = true;
  288. }
  289. } else if (tensor->NeedSyncHostToDevice() || tensor_address == nullptr) {
  290. need_sync = true;
  291. } else if (tensor_address != device_address) {
  292. if (tensor_address->DeviceType() == device_address->DeviceType()) {
  293. AnfAlgo::SetOutputAddr(tensor_address, 0, pk_node.get());
  294. } else {
  295. need_sync = true;
  296. }
  297. }
  298. if (need_sync) {
  299. if (AnfAlgo::IsParameterWeight(input_node->cast<ParameterPtr>()) || UpdatedByAssign(kernel_graph, input_node) ||
  300. ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode) {
  301. tensor->set_device_address(device_address);
  302. }
  303. MS_EXCEPTION_IF_NULL(device_address);
  304. if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0),
  305. LongToSize(tensor->data().nbytes()), tensor->data_type(),
  306. tensor->data_c())) {
  307. MS_LOG(EXCEPTION) << "SyncHostToDevice failed.";
  308. }
  309. }
  310. }
  311. tensor->set_sync_status(kNoNeedSync);
  312. }
  313. }
  314. void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  315. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  316. MS_EXCEPTION_IF_NULL(runtime_instance);
  317. if (!runtime_instance->Run(kernel_graph.get(), false)) {
  318. MS_LOG(EXCEPTION) << "GPU execute graph failed!";
  319. }
  320. }
  321. GraphId GPUSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
  322. // Construct graph, if successfully, graph_sum_ + 1
  323. auto graph = ConstructKernelGraph(lst, outputs);
  324. MS_EXCEPTION_IF_NULL(graph);
  325. return CompileGraphImpl(graph);
  326. }
  327. GraphId GPUSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
  328. std::vector<KernelGraphPtr> all_graphs;
  329. auto root_graph = ConstructKernelGraph(func_graph, &all_graphs);
  330. MS_EXCEPTION_IF_NULL(root_graph);
  331. if (all_graphs.size() != 1) {
  332. MS_LOG(EXCEPTION) << "Gpu backend does not support multi-graph schedule. graph num" << all_graphs.size();
  333. }
  334. opt::BackendCommonOptimization(root_graph);
  335. return CompileGraphImpl(root_graph);
  336. }
  337. GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
  338. // Prepare ms context info for dump .pb graph
  339. auto context_ptr = MsContext::GetInstance();
  340. MS_EXCEPTION_IF_NULL(context_ptr);
  341. bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
  342. // Dump .pb graph before graph optimization
  343. if (save_graphs) {
  344. DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
  345. }
  346. // Graph optimization irrelevant to device data format
  347. Optimize(graph);
  348. // Select kernel build info
  349. SelectKernel(graph);
  350. // Graph optimization relevant to device data format
  351. HardwareOptimize(graph);
  352. // Graph kernel fusion optimization
  353. GraphKernelOptimize(graph);
  354. // Start gpu kernel runtime
  355. StartKernelRT();
  356. #if ENABLE_CPU && ENABLE_GPU
  357. InitPsWorker(graph);
  358. #endif
  359. // Assign CUDA streams
  360. AssignStream(graph);
  361. // Dump .pb graph before remove nop nodes
  362. if (save_graphs) {
  363. DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
  364. }
  365. // Update Graph Dynamic Shape Attr.
  366. UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
  367. graph->UpdateGraphDynamicAttr();
  368. // Hide NopOp from execution graph in graph mode
  369. if (context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode) {
  370. opt::HideNopNode(graph.get());
  371. }
  372. // Build kernel if node is cnode
  373. BuildKernel(graph);
  374. // Get summary nodes.
  375. SetSummaryNodes(graph.get());
  376. // Dump .pb graph after graph optimization
  377. if (save_graphs) {
  378. DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
  379. }
  380. // Set graph manager.
  381. MS_EXCEPTION_IF_NULL(context_);
  382. FuncGraphManagerPtr manager = MakeManager({graph});
  383. context_->AddManager(manager);
  384. if (manager) {
  385. manager->AddFuncGraph(graph);
  386. graph->set_manager(manager);
  387. }
  388. // Alloc memory, including static memory and dynamic memory
  389. AllocateMemory(graph.get());
  390. #ifdef ENABLE_DEBUGGER
  391. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  392. debugger_->LoadGraphs(graph);
  393. }
  394. #endif
  395. MS_LOG(INFO) << "CompileGraph graph_id: " << graph->graph_id();
  396. return graph->graph_id();
  397. }
  398. void GPUSession::RunGraphImpl(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs,
  399. VectorRef *outputs) {
  400. auto &kernel_graph = graphs_[graph_id];
  401. MS_LOG(INFO) << "RunGraph graph_id: " << graph_id;
  402. // In pynative mode, device addresses of tensors in value nodes change.
  403. SyncValueNodeDeviceAddr(kernel_graph);
  404. // Load input data from user input
  405. LoadInputData(kernel_graph, inputs);
  406. if (debugger_) {
  407. debugger_->PreExecute(kernel_graph, graph_sum_);
  408. }
  409. #if ENABLE_CPU && ENABLE_GPU
  410. // Initialize parameter server
  411. InitPSParamAndOptim(kernel_graph, inputs);
  412. #endif
  413. MS_EXCEPTION_IF_NULL(kernel_graph);
  414. // It's InitDataset graph if kernel_num == 1, skip the loop.
  415. int kernel_num = kernel_graph->execution_order().size();
  416. int64_t loopsize = (kernel_num > 1) ? ConfigManager::GetInstance().gpu_loopsink_size() : 1;
  417. for (int64_t i = 0; i < loopsize; i++) {
  418. #if ENABLE_CPU && ENABLE_GPU
  419. std::string channel_name;
  420. if (ps::PsDataPrefetch::GetInstance().cache_enable() && IsGetNextGraph(graph_id, &channel_name)) {
  421. ps::ps_cache_instance.IncreaseGraphStep(channel_name);
  422. }
  423. #endif
  424. Execute(kernel_graph);
  425. }
  426. // In pynative mode, device addresses of tensors in value nodes need be clean.
  427. CleanValueNodeDeviceAddr(kernel_graph);
  428. // Summary
  429. auto context_ptr = MsContext::GetInstance();
  430. MS_EXCEPTION_IF_NULL(context_ptr);
  431. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_GPU_SUMMARY)) {
  432. Summary(kernel_graph.get());
  433. }
  434. PostIterationDbg(kernel_graph);
  435. }
  436. void GPUSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
  437. const std::vector<tensor::TensorPtr> &input_tensors,
  438. const std::vector<int64_t> &tensors_mask) {
  439. // Check if the graph cache exists.
  440. if (run_op_graphs_.find(graph_info) != run_op_graphs_.end()) {
  441. return;
  442. }
  443. // Prepare the graph
  444. auto kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask);
  445. MS_EXCEPTION_IF_NULL(kernel_graph);
  446. SelectKernel(kernel_graph);
  447. RunOpHardwareOptimize(kernel_graph);
  448. StartKernelRT();
  449. RunOpHideNopNode(kernel_graph);
  450. BuildKernel(kernel_graph);
  451. run_op_graphs_[graph_info] = kernel_graph;
  452. }
  453. void GPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
  454. std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
  455. const std::vector<int64_t> &tensors_mask) {
  456. MS_EXCEPTION_IF_NULL(input_tensors);
  457. MS_EXCEPTION_IF_NULL(op_run_info);
  458. BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask);
  459. EraseValueNodeTensor(tensors_mask, input_tensors);
  460. // run op
  461. auto kernel_graph = run_op_graphs_[graph_info];
  462. MS_EXCEPTION_IF_NULL(kernel_graph);
  463. RunOpRemoveNopNode(kernel_graph);
  464. RunOpAllocateMemory(*input_tensors, kernel_graph.get());
  465. // Execute the computation
  466. LoadInputData(kernel_graph, *input_tensors);
  467. Execute(kernel_graph);
  468. // Fetch outputs
  469. UpdateOutputs(kernel_graph, outputs, *input_tensors);
  470. // update output abstract of dynamic op to op_run_info
  471. if (op_run_info->is_dynamic_shape) {
  472. UpdateOutputAbstract(kernel_graph, op_run_info);
  473. }
  474. RunOpClearMemory(kernel_graph.get());
  475. }
  476. void GPUSession::Dump(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  477. if (debugger_->DebuggerBackendEnabled()) {
  478. MS_EXCEPTION_IF_NULL(kernel_graph);
  479. E2eDumpUtil::DumpData(kernel_graph.get(), device_id_, debugger_.get());
  480. } else {
  481. DumpJsonParser::GetInstance().UpdateDumpIter();
  482. }
  483. }
  484. bool GPUSession::DumpDataEnabledIteration() const {
  485. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  486. MS_EXCEPTION_IF_NULL(runtime_instance);
  487. return runtime_instance->DumpDataEnabledIteration();
  488. }
  489. void GPUSession::PostIterationDbg(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  490. bool dump_enabled = DumpDataEnabledIteration();
  491. // debug used for dump
  492. if (debugger_ && dump_enabled) {
  493. Dump(kernel_graph);
  494. } else {
  495. DumpJsonParser::GetInstance().UpdateDumpIter();
  496. }
  497. if (debugger_) {
  498. debugger_->PostExecute();
  499. }
  500. }
  501. void GPUSession::SyncValueNodeDeviceAddr(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  502. auto context_ptr = MsContext::GetInstance();
  503. MS_EXCEPTION_IF_NULL(context_ptr);
  504. if (context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode) {
  505. return;
  506. }
  507. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  508. MS_EXCEPTION_IF_NULL(runtime_instance);
  509. runtime_instance->SyncValueNodeDeviceAddr(kernel_graph.get());
  510. }
  511. void GPUSession::CleanValueNodeDeviceAddr(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  512. auto context_ptr = MsContext::GetInstance();
  513. MS_EXCEPTION_IF_NULL(context_ptr);
  514. if (context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode) {
  515. return;
  516. }
  517. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  518. MS_EXCEPTION_IF_NULL(runtime_instance);
  519. runtime_instance->CleanValueNodeDeviceAddr(kernel_graph.get());
  520. }
  521. void GPUSession::SyncStream() {
  522. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  523. MS_EXCEPTION_IF_NULL(runtime_instance);
  524. auto ret = runtime_instance->SyncStream();
  525. if (!ret) {
  526. MS_LOG(EXCEPTION) << "Sync stream error!";
  527. }
  528. }
  529. } // namespace gpu
  530. } // namespace session
  531. } // namespace mindspore