You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

gpu_session.cc 33 kB

5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "backend/session/gpu_session.h"
  17. #include <string>
  18. #include <utility>
  19. #include "backend/optimizer/common/helper.h"
  20. #include "backend/optimizer/common/optimizer.h"
  21. #include "backend/optimizer/common/pass_manager.h"
  22. #include "backend/optimizer/common/common_backend_optimization.h"
  23. #include "backend/optimizer/gpu/adam_weight_decay_fusion.h"
  24. #include "backend/optimizer/gpu/adam_fusion.h"
  25. #include "backend/optimizer/gpu/alltoall_fusion.h"
  26. #include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h"
  27. #include "backend/optimizer/gpu/apply_momentum_scale_fusion.h"
  28. #include "backend/optimizer/gpu/apply_momentum_weight_fusion.h"
  29. #include "backend/optimizer/gpu/batch_norm_relu_fusion.h"
  30. #include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h"
  31. #include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h"
  32. #include "backend/optimizer/gpu/post_batch_norm_add_relu_fusion.h"
  33. #include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h"
  34. #include "backend/optimizer/gpu/combine_momentum_fusion.h"
  35. #include "backend/optimizer/gpu/combine_cast_fusion.h"
  36. #include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
  37. #include "backend/optimizer/gpu/insert_format_transform_op.h"
  38. #include "backend/optimizer/gpu/replace_momentum_cast_fusion.h"
  39. #include "backend/optimizer/gpu/replace_addn_fusion.h"
  40. #include "backend/optimizer/gpu/print_reduce_fusion.h"
  41. #include "backend/optimizer/gpu/bce_with_logits_loss_fusion.h"
  42. #include "backend/optimizer/gpu/remove_format_transform_pair.h"
  43. #include "backend/optimizer/gpu/remove_redundant_format_transform.h"
  44. #include "backend/optimizer/gpu/reduce_precision_fusion.h"
  45. #include "backend/optimizer/gpu/insert_cast_gpu.h"
  46. #include "backend/optimizer/gpu/relu_v2_pass.h"
  47. #include "backend/optimizer/gpu/add_relu_v2_fusion.h"
  48. #include "backend/optimizer/gpu/add_relu_grad_v2_fusion.h"
  49. #include "backend/optimizer/gpu/matmul_biasadd_fusion.h"
  50. #ifdef ENABLE_GPU_INFER
  51. #include "backend/optimizer/trt_pass/graph_converter.h"
  52. #endif
  53. #include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
  54. #include "backend/optimizer/pass/communication_op_fusion.h"
  55. #include "backend/optimizer/gpu/concat_outputs_for_all_gather.h"
  56. #include "backend/optimizer/pass/getitem_tuple.h"
  57. #include "backend/optimizer/pass/optimize_updatestate.h"
  58. #include "backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.h"
  59. #include "common/trans.h"
  60. #include "debug/anf_ir_dump.h"
  61. #include "debug/dump_proto.h"
  62. #ifdef ENABLE_DEBUGGER
  63. #include "debug/data_dump/e2e_dump.h"
  64. #include "debug/data_dump/dump_json_parser.h"
  65. #include "debug/debugger/proto_exporter.h"
  66. #include "debug/data_dump/dump_utils.h"
  67. #include "debug/tensor_load.h"
  68. #else
  69. #include "debug/debugger/proto_exporter_stub.h"
  70. #endif
  71. #include "runtime/device/gpu/gpu_kernel_build.h"
  72. #include "runtime/device/gpu/gpu_kernel_runtime.h"
  73. #include "runtime/device/gpu/gpu_stream_assign.h"
  74. #include "runtime/device/gpu/kernel_info_setter.h"
  75. #include "runtime/device/kernel_runtime_manager.h"
  76. #include "runtime/device/gpu/cuda_driver.h"
  77. #include "runtime/device/gpu/distribution/collective_init.h"
  78. #include "runtime/device/gpu/gpu_bucket.h"
  79. #include "runtime/device/gpu/gpu_device_address.h"
  80. #include "utils/ms_utils.h"
  81. #include "utils/config_manager.h"
  82. #include "utils/ms_context.h"
  83. #include "utils/context/graph_kernel_flags.h"
  84. #include "utils/utils.h"
  85. #include "abstract/utils.h"
  86. #if ENABLE_CPU && ENABLE_GPU
  87. #include "ps/util.h"
  88. #include "ps/ps_cache/ps_cache_manager.h"
  89. #endif
  90. #ifdef ENABLE_DUMP_IR
  91. #include "debug/rdr/running_data_recorder.h"
  92. #endif
  93. namespace mindspore {
  94. namespace session {
  95. namespace gpu {
  96. using AnfAlgo = mindspore::session::AnfRuntimeAlgorithm;
  97. using CollectiveInitializer = device::gpu::CollectiveInitializer;
  98. using GetLocalRankId = device::gpu::GetLocalRankId;
  99. using InitNCCLComm = device::gpu::InitNCCLComm;
  100. void GPUSession::Init(uint32_t device_id) {
  101. const void *collective_handle_ = CollectiveInitializer::instance().collective_handle();
  102. bool collective_inited = CollectiveInitializer::instance().collective_inited();
  103. if (collective_inited && collective_handle_ != nullptr) {
  104. auto get_local_rank_funcptr =
  105. reinterpret_cast<GetLocalRankId>(dlsym(const_cast<void *>(collective_handle_), "local_rank_id"));
  106. MS_EXCEPTION_IF_NULL(get_local_rank_funcptr);
  107. device_id = IntToUint((*get_local_rank_funcptr)());
  108. }
  109. bool ret = device::gpu::CudaDriver::SetDevice(UintToInt(device_id));
  110. if (!ret) {
  111. MS_LOG(EXCEPTION) << "GPUSession failed to set current device id:" << device_id;
  112. }
  113. auto ms_context = MsContext::GetInstance();
  114. MS_EXCEPTION_IF_NULL(ms_context);
  115. ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
  116. if (collective_inited) {
  117. if (collective_handle_ != nullptr) {
  118. MS_LOG(INFO) << "Start initializing NCCL communicator for device " << device_id;
  119. auto init_nccl_comm_funcptr =
  120. reinterpret_cast<InitNCCLComm>(dlsym(const_cast<void *>(collective_handle_), "InitNCCLComm"));
  121. MS_EXCEPTION_IF_NULL(init_nccl_comm_funcptr);
  122. (*init_nccl_comm_funcptr)();
  123. MS_LOG(INFO) << "End initializing NCCL communicator.";
  124. rank_id_ = GetRankId();
  125. }
  126. }
  127. #ifndef ENABLE_SECURITY
  128. auto &json_parser = DumpJsonParser::GetInstance();
  129. // Dump json config file if dump is enabled
  130. json_parser.CopyDumpJsonToDir(rank_id_);
  131. json_parser.CopyMSCfgJsonToDir(rank_id_);
  132. #endif
  133. MS_LOG(INFO) << "Set device id " << device_id << " for gpu session.";
  134. InitExecutor(kGPUDevice, device_id);
  135. }
  136. void GPUSession::SelectKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  137. MS_EXCEPTION_IF_NULL(kernel_graph);
  138. device::gpu::FormatTransformChecker::GetInstance().CheckSupportFormatTransform(kernel_graph);
  139. for (const auto &kernel_node : kernel_graph->execution_order()) {
  140. MS_EXCEPTION_IF_NULL(kernel_node);
  141. device::gpu::SetKernelInfo(kernel_node);
  142. }
  143. }
  144. void GPUSession::StartKernelRT() const {
  145. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  146. MS_EXCEPTION_IF_NULL(runtime_instance);
  147. if (!runtime_instance->Init()) {
  148. MS_LOG(EXCEPTION) << "GPU start kernel runtime failed";
  149. }
  150. }
  151. void GPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  152. MS_EXCEPTION_IF_NULL(kernel_graph);
  153. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  154. auto pm = std::make_shared<opt::PassManager>();
  155. #ifdef ENABLE_GPU_INFER
  156. pm->AddPass(std::make_shared<opt::GraphConverter>());
  157. #endif
  158. pm->AddPass(std::make_shared<opt::MatMulBiasAddFusion>());
  159. pm->AddPass(std::make_shared<opt::AdamWeightDecayFusion>());
  160. pm->AddPass(std::make_shared<opt::AdamFusion>());
  161. pm->AddPass(std::make_shared<opt::AllToAllFusion>());
  162. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayScaleFusion>());
  163. pm->AddPass(std::make_shared<opt::ApplyMomentumScaleFusion>());
  164. pm->AddPass(std::make_shared<opt::ApplyMomentumWeightDecayFusion>());
  165. if (!graphkernel::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  166. pm->AddPass(std::make_shared<opt::CastAllFusion>("cast_all"));
  167. }
  168. pm->AddPass(std::make_shared<opt::CombineMomentumFusion>("combine_momentum"));
  169. pm->AddPass(std::make_shared<opt::ReplaceMomentumCastFusion>());
  170. pm->AddPass(std::make_shared<opt::ReplaceAddNFusion>());
  171. pm->AddPass(std::make_shared<opt::PrintReduceFusion>("print_reduce"));
  172. pm->AddPass(std::make_shared<opt::BCEWithLogitsLossFusion>());
  173. pm->AddPass(std::make_shared<opt::InsertCastGPU>("insert_cast_gpu"));
  174. optimizer->AddPassManager(pm);
  175. (void)optimizer->Optimize(kernel_graph);
  176. kernel_graph->SetExecOrderByDefault();
  177. }
  178. void GPUSession::HardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  179. MS_EXCEPTION_IF_NULL(kernel_graph);
  180. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  181. auto pm = std::make_shared<opt::PassManager>();
  182. pm->AddPass(std::make_shared<opt::BatchNormReluFusion>());
  183. pm->AddPass(std::make_shared<opt::BatchNormReluGradFusion>());
  184. pm->AddPass(std::make_shared<opt::BatchNormAddReluFusion>());
  185. pm->AddPass(std::make_shared<opt::PostBatchNormAddReluFusion>());
  186. pm->AddPass(std::make_shared<opt::BatchNormAddReluGradFusion>());
  187. pm->AddPass(std::make_shared<opt::InsertFormatTransformOp>());
  188. pm->AddPass(std::make_shared<opt::RemoveFormatTransformPair>());
  189. pm->AddPass(std::make_shared<opt::RemoveRedundantFormatTransform>());
  190. // Remove node only used by UpdateState, in order to ensure the correct execution sequence in CudnnInplaceAggregate.
  191. pm->AddPass(std::make_shared<opt::OptimizeUpdateState>());
  192. pm->AddPass(std::make_shared<opt::CudnnInplaceAggregate>());
  193. pm->AddPass(std::make_shared<opt::ReluV2Pass>());
  194. pm->AddPass(std::make_shared<opt::AddReluV2Fusion>());
  195. pm->AddPass(std::make_shared<opt::AddReluGradV2Fusion>());
  196. pm->AddPass(std::make_shared<opt::AllReduceFusion>());
  197. pm->AddPass(std::make_shared<opt::AdjustDependForParallelOptimizerRecomputeAllGatherFusion>(
  198. "adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion"));
  199. pm->AddPass(std::make_shared<opt::AllGatherFusion>());
  200. pm->AddPass(std::make_shared<opt::ConcatOutputsForAllGather>());
  201. pm->AddPass(std::make_shared<opt::GetitemTuple>());
  202. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  203. optimizer->AddPassManager(pm);
  204. (void)optimizer->Optimize(kernel_graph);
  205. kernel_graph->SetExecOrderByDefault();
  206. }
  207. void GPUSession::RunOpOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  208. MS_EXCEPTION_IF_NULL(kernel_graph);
  209. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  210. auto pm = std::make_shared<opt::PassManager>();
  211. pm->AddPass(std::make_shared<opt::BCEWithLogitsLossFusion>());
  212. pm->AddPass(std::make_shared<opt::InsertCastGPU>("insert_cast_gpu"));
  213. optimizer->AddPassManager(pm);
  214. (void)optimizer->Optimize(kernel_graph);
  215. kernel_graph->SetExecOrderByDefault();
  216. }
  217. void GPUSession::RunOpHardwareOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  218. MS_EXCEPTION_IF_NULL(kernel_graph);
  219. auto optimizer = std::make_shared<opt::GraphOptimizer>();
  220. auto pm = std::make_shared<opt::PassManager>();
  221. pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
  222. optimizer->AddPassManager(pm);
  223. (void)optimizer->Optimize(kernel_graph);
  224. kernel_graph->SetExecOrderByDefault();
  225. }
  226. void GPUSession::GraphKernelOptimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
  227. if (!graphkernel::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  228. return;
  229. }
  230. graphkernel::GraphKernelOptimize(kernel_graph);
  231. kernel_graph->SetExecOrderByDefault();
  232. }
  233. void GPUSession::AssignStream(const std::shared_ptr<KernelGraph> &kernel_graph) {
  234. MS_EXCEPTION_IF_NULL(kernel_graph);
  235. device::gpu::AssignGpuStream(kernel_graph);
  236. }
  237. void GPUSession::BuildKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  238. auto kernels = kernel_graph->execution_order();
  239. device::gpu::CreateGPUKernel(kernels);
  240. }
  241. void GPUSession::AllocateMemory(const KernelGraph *kernel_graph) const {
  242. MS_EXCEPTION_IF_NULL(kernel_graph);
  243. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  244. MS_EXCEPTION_IF_NULL(runtime_instance);
  245. runtime_instance->AssignMemory(*kernel_graph);
  246. }
  247. void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors,
  248. const KernelGraph *kernel_graph) const {
  249. MS_EXCEPTION_IF_NULL(kernel_graph);
  250. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  251. MS_EXCEPTION_IF_NULL(runtime_instance);
  252. runtime_instance->RunOpAssignMemory(input_tensors, *kernel_graph);
  253. }
  254. void GPUSession::RunOpGenKernelEvent(const KernelGraph *graph) const {
  255. MS_EXCEPTION_IF_NULL(graph);
  256. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  257. MS_EXCEPTION_IF_NULL(runtime_instance);
  258. runtime_instance->GenKernelEvents(*graph);
  259. }
  260. void GPUSession::RunOpClearMemory(const KernelGraph *kernel_graph) const {
  261. MS_EXCEPTION_IF_NULL(kernel_graph);
  262. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  263. MS_EXCEPTION_IF_NULL(runtime_instance);
  264. runtime_instance->RunOpClearMemory(*kernel_graph);
  265. }
  266. namespace {
  267. constexpr auto kAssignInputSize = 3;
  268. constexpr auto kAssignUpdateIndex = 1;
  269. bool UpdatedByAssign(const KernelGraphPtr &kernel_graph, const AnfNodePtr &node) {
  270. MS_EXCEPTION_IF_NULL(kernel_graph);
  271. auto manager = kernel_graph->manager();
  272. if (manager == nullptr) {
  273. return false;
  274. }
  275. auto &node_users = manager->node_users();
  276. auto iter = node_users.find(node);
  277. if (iter == node_users.end()) {
  278. return false;
  279. }
  280. auto &users = iter->second;
  281. return std::any_of(users.begin(), users.end(), [](const std::pair<AnfNodePtr, int64_t> &user) {
  282. MS_EXCEPTION_IF_NULL(user.first);
  283. auto output_cnode = user.first->cast<CNodePtr>();
  284. return output_cnode != nullptr && IsPrimitiveCNode(output_cnode, prim::kPrimAssign) &&
  285. user.second == kAssignUpdateIndex && output_cnode->inputs().size() > kAssignInputSize;
  286. });
  287. }
  288. size_t UpdateGraphInputAbstract(const AnfNodePtr input_node, const tensor::TensorPtr tensor) {
  289. MS_EXCEPTION_IF_NULL(input_node);
  290. MS_EXCEPTION_IF_NULL(tensor);
  291. size_t size = LongToSize(tensor->data().nbytes());
  292. if (!input_node->isa<Parameter>()) {
  293. return size;
  294. }
  295. auto input_param = input_node->cast<ParameterPtr>();
  296. if (input_param != nullptr && input_param->has_dynamic_shape()) {
  297. auto tensor_shape = tensor->shape();
  298. std::vector<size_t> shape_tmp;
  299. (void)std::transform(tensor_shape.begin(), tensor_shape.end(), std::back_inserter(shape_tmp), IntToSize);
  300. AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(input_node, 0)}, {shape_tmp},
  301. input_node.get());
  302. size = abstract::ShapeSize(shape_tmp) * abstract::TypeIdSize(tensor->data_type());
  303. }
  304. return size;
  305. }
  306. bool CheckIfNeedSync(const tensor::TensorPtr &tensor, const DeviceAddressPtr &device_address,
  307. const ParameterPtr &pk_node) {
  308. MS_EXCEPTION_IF_NULL(tensor);
  309. MS_EXCEPTION_IF_NULL(pk_node);
  310. auto tensor_address = std::dynamic_pointer_cast<device::DeviceAddress>(tensor->device_address());
  311. bool need_sync = false;
  312. auto ms_context = MsContext::GetInstance();
  313. MS_EXCEPTION_IF_NULL(ms_context);
  314. if (ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
  315. if (tensor_address == nullptr || tensor_address != device_address) {
  316. need_sync = true;
  317. }
  318. } else if (tensor->NeedSyncHostToDevice() || tensor_address == nullptr) {
  319. need_sync = true;
  320. } else if (tensor_address != device_address) {
  321. if (tensor_address->DeviceType() == device_address->DeviceType()) {
  322. AnfAlgo::SetOutputAddr(tensor_address, 0, pk_node.get());
  323. } else {
  324. need_sync = true;
  325. }
  326. }
  327. return need_sync;
  328. }
  329. } // namespace
  330. void GPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
  331. const std::vector<tensor::TensorPtr> &inputs_const) const {
  332. std::vector<tensor::TensorPtr> inputs(inputs_const);
  333. MS_EXCEPTION_IF_NULL(kernel_graph);
  334. auto &input_nodes = kernel_graph->input_nodes();
  335. auto ms_context = MsContext::GetInstance();
  336. MS_EXCEPTION_IF_NULL(ms_context);
  337. if (inputs.size() != input_nodes.size()) {
  338. MS_LOG(EXCEPTION) << "Tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size();
  339. }
  340. for (size_t i = 0; i < inputs.size(); ++i) {
  341. auto tensor = inputs[i];
  342. MS_EXCEPTION_IF_NULL(tensor);
  343. auto input_node = input_nodes[i];
  344. MS_EXCEPTION_IF_NULL(input_node);
  345. if (input_node->isa<Parameter>() && AnfAlgo::OutputAddrExist(input_node, 0)) {
  346. #if ENABLE_CPU && ENABLE_GPU
  347. const std::string &param_name = input_node->fullname_with_scope();
  348. if (ps::ps_cache_instance.IsHashTable(param_name)) {
  349. continue;
  350. }
  351. #endif
  352. auto pk_node = input_node->cast<ParameterPtr>();
  353. auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
  354. MS_EXCEPTION_IF_NULL(device_address);
  355. bool need_sync = CheckIfNeedSync(tensor, device_address, pk_node);
  356. if (need_sync) {
  357. if (AnfAlgo::IsParameterWeight(pk_node) || UpdatedByAssign(kernel_graph, input_node) ||
  358. ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode) {
  359. tensor->set_device_address(device_address);
  360. }
  361. auto size = UpdateGraphInputAbstract(input_node, tensor);
  362. if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0), size, tensor->data_type(),
  363. tensor->data_c())) {
  364. MS_LOG(EXCEPTION) << "SyncHostToDevice failed.";
  365. }
  366. if (kernel_graph->IsUpdatedParameter(pk_node)) {
  367. tensor->SetIsUpdateByDevice();
  368. }
  369. }
  370. }
  371. tensor->set_sync_status(kNoNeedSync);
  372. }
  373. }
  374. GraphId GPUSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
  375. // Construct graph, if successfully, graph_sum_ + 1
  376. auto graph = ConstructKernelGraph(lst, outputs);
  377. MS_EXCEPTION_IF_NULL(graph);
  378. return CompileGraphImpl(graph);
  379. }
  380. GraphId GPUSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
  381. std::vector<KernelGraphPtr> all_graphs;
  382. auto root_graph = ConstructKernelGraph(func_graph, &all_graphs);
  383. MS_EXCEPTION_IF_NULL(root_graph);
  384. if (all_graphs.size() != 1) {
  385. MS_LOG(EXCEPTION) << "Gpu backend does not support multi-graph schedule, graph num is " << all_graphs.size();
  386. }
  387. // Insert maketuple graph output in case of multi-outputs.
  388. // The ConvertTupleOutputToMaketuple pass will insert TupleGetItem.
  389. AnfAlgo::InsertMakeTupleForOutput(NOT_NULL(root_graph));
  390. opt::BackendCommonOptimization(root_graph);
  391. return CompileGraphImpl(root_graph);
  392. }
  393. GraphId GPUSession::CompileGraphImpl(const KernelGraphPtr &graph) {
  394. MS_EXCEPTION_IF_NULL(graph);
  395. // Prepare ms context info for dump .pb graph
  396. auto context_ptr = MsContext::GetInstance();
  397. MS_EXCEPTION_IF_NULL(context_ptr);
  398. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  399. MS_EXCEPTION_IF_NULL(runtime_instance);
  400. #ifndef ENABLE_SECURITY
  401. auto &json_parser = DumpJsonParser::GetInstance();
  402. json_parser.Parse();
  403. #endif
  404. #ifdef ENABLE_DUMP_IR
  405. bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
  406. // Dump .pb graph before graph optimization
  407. if (save_graphs) {
  408. DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
  409. }
  410. #endif
  411. // Graph optimization irrelevant to device data format
  412. Optimize(graph);
  413. // Select kernel build info
  414. SelectKernel(graph);
  415. // Graph optimization relevant to device data format
  416. HardwareOptimize(graph);
  417. // Run final optimization
  418. FinalOptimize(graph);
  419. // Graph kernel fusion optimization
  420. GraphKernelOptimize(graph);
  421. // Start gpu kernel runtime
  422. StartKernelRT();
  423. #if ENABLE_CPU && ENABLE_GPU
  424. InitPsWorker(graph);
  425. #endif
  426. // Assign CUDA streams
  427. AssignStream(graph);
  428. #ifdef ENABLE_DUMP_IR
  429. // Dump .pb graph before remove nop nodes
  430. if (save_graphs) {
  431. DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
  432. }
  433. #endif
  434. opt::AddDynamicShapeAttrPass(graph);
  435. const bool pynative_mode = context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
  436. // Hide NopOp from execution graph in graph mode
  437. if (!pynative_mode) {
  438. opt::HideNopNode(graph.get());
  439. }
  440. // Build kernel if node is cnode
  441. BuildKernel(graph);
  442. #ifdef ENABLE_DUMP_IR
  443. std::string name = "graph_build";
  444. DumpGraphParams dump_params = {true, static_cast<int>(kWholeStack)};
  445. (void)mindspore::RDR::RecordAnfGraph(SubModuleId::SM_SESSION, name, graph, dump_params, ".ir,.pb");
  446. auto &kernels = graph->execution_order();
  447. std::string exec_order_name = "graph_exec_order." + std::to_string(graph->graph_id());
  448. (void)mindspore::RDR::RecordGraphExecOrder(SubModuleId::SM_SESSION, exec_order_name, kernels);
  449. #endif
  450. #ifndef ENABLE_SECURITY
  451. // Get summary nodes.
  452. SetSummaryNodes(graph.get());
  453. #endif
  454. // Dump .pb graph after graph optimization
  455. #ifdef ENABLE_DUMP_IR
  456. if (save_graphs) {
  457. DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
  458. }
  459. #endif
  460. #ifndef ENABLE_SECURITY
  461. if (json_parser.e2e_dump_enabled()) {
  462. graph->set_root_graph_id(graph->graph_id());
  463. std::string final_graph = "trace_code_graph_" + std::to_string(graph->graph_id());
  464. std::string root_dir = json_parser.path() + "/rank_" + std::to_string(rank_id_);
  465. std::string target_dir = root_dir + "/graphs";
  466. std::string ir_file_path = target_dir + "/" + "ms_output_" + final_graph + ".ir";
  467. DumpIRProtoWithSrcInfo(graph, final_graph, target_dir, kDebugWholeStack);
  468. DumpIR("trace_code_graph", graph, true, kWholeStack, ir_file_path);
  469. DumpGraphExeOrder("ms_execution_order_graph_" + std::to_string(graph->graph_id()) + ".csv", root_dir,
  470. graph->execution_order());
  471. }
  472. #endif
  473. // Set graph manager.
  474. MS_EXCEPTION_IF_NULL(context_);
  475. FuncGraphManagerPtr manager = MakeManager({graph});
  476. context_->AddManager(manager);
  477. if (manager) {
  478. manager->AddFuncGraph(graph);
  479. graph->set_manager(manager);
  480. }
  481. InitAllBucket(graph);
  482. // Alloc memory in graph mode, including static memory and dynamic memory
  483. if (!pynative_mode) {
  484. AllocateMemory(graph.get());
  485. }
  486. DumpGraph(graph);
  487. #ifdef ENABLE_DEBUGGER
  488. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  489. debugger_->LoadGraphs(graph);
  490. }
  491. #endif
  492. MS_LOG(INFO) << "CompileGraph graph_id: " << graph->graph_id();
  493. return graph->graph_id();
  494. }
  495. void GPUSession::PreExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
  496. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  497. #ifdef ENABLE_DEBUGGER
  498. if (debugger_) {
  499. debugger_->PreExecute(kernel_graph);
  500. }
  501. DumpSetup(kernel_graph);
  502. #endif
  503. #if ENABLE_CPU && ENABLE_GPU
  504. // Initialize parameter server
  505. InitPSParamAndOptim(kernel_graph, inputs);
  506. #endif
  507. }
  508. void GPUSession::PostExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
  509. const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) {
  510. // Summary
  511. auto context_ptr = MsContext::GetInstance();
  512. MS_EXCEPTION_IF_NULL(context_ptr);
  513. #ifndef ENABLE_SECURITY
  514. if (context_ptr->get_param<bool>(MS_CTX_ENABLE_GPU_SUMMARY)) {
  515. Summary(kernel_graph.get());
  516. }
  517. #endif
  518. #ifdef ENABLE_DEBUGGER
  519. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  520. debugger_->LoadParametersAndConst(kernel_graph);
  521. }
  522. // debug used for dump
  523. if (debugger_ && debugger_->CheckDebuggerDumpEnabled()) {
  524. Dump(kernel_graph);
  525. }
  526. if (debugger_) {
  527. debugger_->PostExecute();
  528. }
  529. #endif
  530. }
  531. void GPUSession::ExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph) {
  532. int kernel_num = kernel_graph->execution_order().size();
  533. int64_t loopsize = (kernel_num > 1) ? ConfigManager::GetInstance().gpu_loopsink_size() : 1;
  534. for (int64_t i = 0; i < loopsize; i++) {
  535. #if ENABLE_CPU && ENABLE_GPU
  536. std::string channel_name;
  537. if (ps::PsDataPrefetch::GetInstance().cache_enable() && IsGetNextGraph(kernel_graph, &channel_name)) {
  538. ps::ps_cache_instance.IncreaseGraphStep(channel_name);
  539. }
  540. #endif
  541. Execute(kernel_graph);
  542. }
  543. }
  544. void GPUSession::UpdateOutputTensors(const VectorRef *outputs,
  545. const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node,
  546. std::map<DeviceAddressPtr, DeviceAddressPtr> *new_to_old_device_address) {
  547. MS_EXCEPTION_IF_NULL(outputs);
  548. for (const auto &item : *outputs) {
  549. if (utils::isa<VectorRefPtr>(item)) {
  550. const auto &vector_ref = utils::cast<VectorRef>(item);
  551. UpdateOutputTensors(&vector_ref, tensor_to_node, new_to_old_device_address);
  552. } else if (utils::isa<tensor::TensorPtr>(item)) {
  553. const auto &tensor = utils::cast<tensor::TensorPtr>(item);
  554. MS_EXCEPTION_IF_NULL(tensor);
  555. const auto &iter = tensor_to_node.find(tensor);
  556. if (iter != tensor_to_node.end()) {
  557. const auto &node = iter->second.first;
  558. const auto &output_index = iter->second.second;
  559. MS_EXCEPTION_IF_NULL(node);
  560. // When the parameter does not have a user in the graph and is used as an output, the device address is null,
  561. // and there is no need to set the device address for tensor.
  562. if (!AnfAlgo::OutputAddrExist(node, output_index, true)) {
  563. continue;
  564. }
  565. auto address = AnfAlgo::GetMutableOutputAddr(node, output_index);
  566. // The outputs may have the same tensor, so need skip when the tensor has been set to device address.
  567. if ((address == nullptr) || (address->GetPtr() == nullptr)) {
  568. // If the device address in the node is invalid, you need to find out whether there is a corresponding
  569. // device address in the new to old device address map to check whether the device address in the node
  570. // has been replaced with a new one.
  571. if ((*new_to_old_device_address).find(address) != (*new_to_old_device_address).end()) {
  572. address = (*new_to_old_device_address)[address];
  573. } else {
  574. continue;
  575. }
  576. }
  577. tensor->set_device_address(address);
  578. // When the device address of graph cnode output is set in tensor, the graph output need be set new device
  579. // address, to avoid that the device address context of tensor be rewritten in the next step or next loop.
  580. // But one time memory application scenarios need to be skipped, because the memory is not allocated next step:
  581. // 1. Non cnode 2. Communication kernel.
  582. bool ps_mode = false;
  583. #if ((defined ENABLE_CPU) && (!defined _WIN32))
  584. ps_mode = ps::PSContext::instance()->is_ps_mode();
  585. #endif
  586. if (node->isa<CNode>() && !AnfAlgo::IsCommunicationOp(node) && !ps_mode) {
  587. auto new_address = std::make_shared<device::gpu::GPUDeviceAddress>(nullptr, address->GetSize());
  588. AnfAlgo::SetOutputAddr(new_address, output_index, node.get());
  589. (*new_to_old_device_address)[new_address] = address;
  590. if (graphkernel::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
  591. auto runtime_instance =
  592. device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  593. MS_EXCEPTION_IF_NULL(runtime_instance);
  594. auto gpu_runtime_instance = dynamic_cast<device::gpu::GPUKernelRuntime *>(runtime_instance);
  595. gpu_runtime_instance->SetAddrInvalid(address);
  596. }
  597. }
  598. if (AnfAlgo::IsDynamicShape(node)) {
  599. const auto &updated_shape = AnfAlgo::GetOutputInferShape(node, output_index);
  600. ShapeVector int_shape;
  601. std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
  602. tensor->set_shape(int_shape);
  603. }
  604. }
  605. if (tensor->NeedSyncDeviceToHostImmediately()) {
  606. tensor->data_sync(false);
  607. tensor->set_device_address(nullptr);
  608. tensor->set_sync_status(kNeedSyncHostToDevice);
  609. }
  610. }
  611. }
  612. }
  613. void GPUSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  614. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  615. MS_EXCEPTION_IF_NULL(runtime_instance);
  616. if (!runtime_instance->Run(*kernel_graph, false)) {
  617. MS_LOG(EXCEPTION) << "GPU execute graph failed!";
  618. }
  619. }
  620. KernelGraphPtr GPUSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
  621. const std::vector<tensor::TensorPtr> &input_tensors,
  622. const std::vector<int64_t> &tensors_mask) {
  623. // Check if the graph cache exists.
  624. auto it = run_op_graphs_.find(graph_info);
  625. if (it != run_op_graphs_.end() && kOpCacheBlackList.find(op_run_info.op_name) == kOpCacheBlackList.end()) {
  626. return it->second;
  627. }
  628. // Prepare the graph
  629. const auto &kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask);
  630. MS_EXCEPTION_IF_NULL(kernel_graph);
  631. RunOpOptimize(kernel_graph);
  632. SelectKernel(kernel_graph);
  633. RunOpHardwareOptimize(kernel_graph);
  634. StartKernelRT();
  635. RunOpHideNopNode(kernel_graph);
  636. BuildKernel(kernel_graph);
  637. auto enable_op_graph_cache = MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_OP_GRAPH_CACHE);
  638. if (enable_op_graph_cache) {
  639. run_op_graphs_[graph_info] = kernel_graph;
  640. }
  641. return kernel_graph;
  642. }
  643. void GPUSession::RunOpImplOrigin(const GraphInfo &graph_info, OpRunInfo *op_run_info,
  644. std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
  645. const std::vector<int64_t> &tensors_mask) {
  646. RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask);
  647. }
  648. void GPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
  649. std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
  650. const std::vector<int64_t> &tensors_mask) {
  651. MS_EXCEPTION_IF_NULL(input_tensors);
  652. MS_EXCEPTION_IF_NULL(op_run_info);
  653. ProcessInputTensorsForHeterogeneous("GPU", *input_tensors);
  654. const auto &kernel_graph = BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask);
  655. EraseValueNodeTensor(tensors_mask, input_tensors);
  656. // wait for allreduce
  657. for (auto &tensor : *input_tensors) {
  658. MS_EXCEPTION_IF_NULL(tensor);
  659. if (tensor->NeedWaitDevice()) {
  660. tensor->WaitDevice();
  661. }
  662. }
  663. // run op
  664. MS_EXCEPTION_IF_NULL(kernel_graph);
  665. RunOpRemoveNopNode(kernel_graph);
  666. RunOpAllocateMemory(*input_tensors, kernel_graph.get());
  667. RunOpGenKernelEvent(kernel_graph.get());
  668. // Execute the computation
  669. LoadInputData(kernel_graph, *input_tensors);
  670. Execute(kernel_graph);
  671. // Fetch outputs
  672. std::map<tensor::TensorPtr, session::KernelWithIndex> tensor_to_node;
  673. UpdateOutputs(kernel_graph, outputs, *input_tensors, &tensor_to_node);
  674. // update output abstract of dynamic op to op_run_info
  675. if (op_run_info->is_dynamic_shape) {
  676. UpdateOutputAbstract(kernel_graph, op_run_info);
  677. }
  678. RunOpClearMemory(kernel_graph.get());
  679. if (kOpCacheBlackList.find(op_run_info->op_name) != kOpCacheBlackList.end()) {
  680. run_op_graphs_.erase(graph_info);
  681. }
  682. }
  683. #ifdef ENABLE_DEBUGGER
  684. void GPUSession::DumpSetup(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  685. MS_LOG(INFO) << "Start!";
  686. MS_EXCEPTION_IF_NULL(kernel_graph);
  687. E2eDump::DumpSetup(kernel_graph.get());
  688. MS_LOG(INFO) << "Finish!";
  689. }
  690. void GPUSession::Dump(const std::shared_ptr<KernelGraph> &kernel_graph) const {
  691. if (debugger_->DebuggerBackendEnabled()) {
  692. MS_EXCEPTION_IF_NULL(kernel_graph);
  693. E2eDump::DumpRunIter(kernel_graph, rank_id_);
  694. E2eDump::DumpData(kernel_graph.get(), rank_id_, debugger_.get());
  695. } else {
  696. DumpJsonParser::GetInstance().UpdateDumpIter();
  697. }
  698. }
  699. bool GPUSession::DumpDataEnabledIteration() const {
  700. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  701. MS_EXCEPTION_IF_NULL(runtime_instance);
  702. return runtime_instance->DumpDataEnabledIteration();
  703. }
  704. #endif
  705. void GPUSession::SyncStream() const {
  706. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  707. MS_EXCEPTION_IF_NULL(runtime_instance);
  708. auto ret = runtime_instance->SyncStream();
  709. if (!ret) {
  710. MS_LOG(EXCEPTION) << "Sync stream error!";
  711. }
  712. }
  713. std::shared_ptr<device::Bucket> GPUSession::CreateBucket(uint32_t bucket_id, uint32_t bucket_size) {
  714. auto bucket = std::make_shared<device::gpu::GPUBucket>(bucket_id, bucket_size);
  715. auto kernel_runtime = device::KernelRuntimeManager::Instance().GetCurrentKernelRuntime();
  716. MS_EXCEPTION_IF_NULL(kernel_runtime);
  717. auto compute_stream = kernel_runtime->compute_stream();
  718. auto communication_stream = kernel_runtime->communication_stream();
  719. MS_EXCEPTION_IF_NULL(compute_stream);
  720. MS_EXCEPTION_IF_NULL(communication_stream);
  721. MS_EXCEPTION_IF_NULL(bucket);
  722. bucket->Init({compute_stream}, {communication_stream});
  723. return bucket;
  724. }
  725. } // namespace gpu
  726. } // namespace session
  727. } // namespace mindspore