You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.cc 22 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <pybind11/operators.h>
  17. #include <pybind11/stl.h>
  18. #include "kernel/oplib/oplib.h"
  19. #include "pipeline/pipeline.h"
  20. #include "operator/composite/composite.h"
  21. #include "ir/signature.h"
  22. #include "pynative/pynative_execute.h"
  23. #include "utils/symbolic.h"
  24. #include "pybind_api/api_register.h"
  25. #include "pipeline/parse/python_adapter.h"
  26. #include "utils/summary/event_writer.h"
  27. #include "utils/config_manager.h"
  28. #include "parallel/context.h"
  29. #include "parallel/device_manager.h"
  30. #include "parallel/costmodel_context.h"
  31. #ifdef ENABLE_GPU_COLLECTIVE
  32. #include "device/gpu/distribution/collective_init.h"
  33. #else
  34. #include "device/gpu/distribution/collective_fake_init.h"
  35. #endif
  36. namespace py = pybind11;
  37. using FuncGraph = mindspore::FuncGraph;
  38. using EnvInstance = mindspore::EnvInstance;
  39. using ExecutorPy = mindspore::pipeline::ExecutorPy;
  40. using Pipeline = mindspore::pipeline::Pipeline;
  41. using PrimitivePy = mindspore::PrimitivePy;
  42. using MetaFuncGraph = mindspore::MetaFuncGraph;
  43. using EventWriter = mindspore::summary::EventWriter;
  44. using OpLib = mindspore::kernel::OpLib;
  45. using ParallelContext = mindspore::parallel::ParallelContext;
  46. using CostModelContext = mindspore::parallel::CostModelContext;
  47. // Interface with python
  48. PYBIND11_MODULE(_c_expression, m) {
  49. m.doc() = "MindSpore c plugin";
  50. (void)py::class_<MetaFuncGraph, std::shared_ptr<MetaFuncGraph>>(*m, "MetaFuncGraph_")
  51. .def_readonly(mindspore::PYTHON_METAFUNCGRAPH_FLAG, &mindspore::MetaFuncGraph::parse_info_)
  52. .def(py::init<std::string &>());
  53. auto fns = mindspore::PybindDefineRegister::AllFuncs();
  54. for (auto &item : fns) {
  55. item.second(&m);
  56. }
  57. // Class Pipeline interface
  58. (void)py::class_<ExecutorPy, std::shared_ptr<ExecutorPy>>(m, "Executor_")
  59. .def_static("get_instance", &ExecutorPy::GetInstance, "Executor get_instance.")
  60. .def("__call__", &ExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.")
  61. .def("del_net_res", &ExecutorPy::DelNetRes, py::arg("network_id") = py::str(""), "Delete network resource.")
  62. .def("get_func_graph", &ExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.")
  63. .def("get_func_graph_proto", &ExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""),
  64. py::arg("type") = py::str("onnx_ir"), "Get graph proto string by specifying ir type.")
  65. .def("compile", &ExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""),
  66. py::arg("use_vm") = py::bool_(false), "Compile obj by executor.")
  67. .def("get_parameter_layout", &ExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"),
  68. "Get Parameter Tensor Layout Dictionary.")
  69. .def("get_strategy", &ExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"),
  70. "Get CNode Strategy Dictionary.")
  71. .def("get_allreduce_fusion", &ExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"),
  72. "Get Allreduce Fusion Dictionary.")
  73. .def("build_data_graph", &ExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"),
  74. py::arg("broadcast_params") = py::dict(), "Build data graph.")
  75. .def("has_compiled", &ExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "get if cell compiled.")
  76. .def("run_init_graph", &ExecutorPy::RunInitGraph, "Run init Graph.");
  77. // Class Graph interface
  78. (void)py::class_<FuncGraph, mindspore::FuncGraphPtr>(m, "FuncGraph").def(py::init());
  79. (void)py::class_<EnvInstance, std::shared_ptr<EnvInstance>>(m, "EnvInstance_")
  80. .def_readonly(mindspore::PYTHON_ENVINSTANCE_FLAG, &mindspore::EnvInstance::parse_info_)
  81. .def(py::init());
  82. (void)m.def("generate_key", &mindspore::pipeline::GenerateKey, "Generate the function graph key.");
  83. (void)m.def("real_run_op", &mindspore::pynative::RunOp, "Run op pynatively.");
  84. (void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id");
  85. (void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl");
  86. (void)m.def("finalize_hccl", &mindspore::pipeline::FinalizeHccl, "Finalize Hccl");
  87. (void)m.def("verify_inputs_signature", &mindspore::pipeline::VerifyInputSignature, "Verify input signature.");
  88. (void)m.def("init_exec_dataset", &mindspore::pipeline::InitExecDataset, py::arg("queue_name"), py::arg("size"),
  89. py::arg("batch_size"), py::arg("types"), py::arg("shapes"), py::arg("input_indexs"),
  90. py::arg("phase") = py::str("dataset"), "Init and exec dataset.");
  91. (void)m.def("_set_dataset_mode_config", &mindspore::ConfigManager::SetDatasetModeConfig, "API for set dataset mode.");
  92. (void)m.def("init_backend", &mindspore::pipeline::InitBackend, "Init Backend.");
  93. (void)m.def("export_graph", &mindspore::pipeline::ExportGraph, "Export Graph.");
  94. (void)py::class_<mindspore::MsContext, std::shared_ptr<mindspore::MsContext>>(m, "MSContext")
  95. .def_static("get_instance", &mindspore::MsContext::GetInstance, "Get ms context instance.")
  96. .def("get_backend_policy", &mindspore::MsContext::backend_policy, "Get backend policy.")
  97. .def("set_backend_policy", &mindspore::MsContext::set_backend_policy, "Set backend policy.")
  98. .def("get_execution_mode", &mindspore::MsContext::execution_mode, "Get execution mode.")
  99. .def("set_execution_mode", &mindspore::MsContext::set_execution_mode, "Set execution mode.")
  100. .def("set_precompile_only", &mindspore::MsContext::set_precompile_only, "Set enable precompile only.")
  101. .def("get_precompile_only", &mindspore::MsContext::precompile_only, "Get enable precompile only.")
  102. .def("get_device_target", &mindspore::MsContext::device_target, "Get device target.")
  103. .def("set_device_target", &mindspore::MsContext::set_device_target, "Set device target.")
  104. .def("get_device_id", &mindspore::MsContext::device_id, "Get device id.")
  105. .def("set_device_id", &mindspore::MsContext::set_device_id, "Set device id.")
  106. .def("open_tsd", &mindspore::MsContext::OpenTsd, "Open tdt dataset client.")
  107. .def("close_tsd", &mindspore::MsContext::CloseTsd, "Close tdt dataset client.")
  108. .def("set_task_sink_flag", &mindspore::MsContext::set_enable_task_sink, "Set enable task sink.")
  109. .def("get_task_sink_flag", &mindspore::MsContext::enable_task_sink, "Get whether to enable task sink.")
  110. .def("get_save_graphs_flag", &mindspore::MsContext::save_graphs_flag, "Get whether to save graphs.")
  111. .def("set_save_graphs_flag", &mindspore::MsContext::set_save_graphs_flag, "Set whether to save graphs.")
  112. .def("get_ir_fusion_flag", &mindspore::MsContext::ir_fusion_flag, "Get whether to enable ir fusion.")
  113. .def("set_ir_fusion_flag", &mindspore::MsContext::set_ir_fusion_flag, "Set whether to enable ir fusion.")
  114. .def("get_auto_mixed_precision_flag", &mindspore::MsContext::auto_mixed_precision_flag,
  115. "Get whether to enable auto mixed precision.")
  116. .def("set_auto_mixed_precision_flag", &mindspore::MsContext::set_auto_mixed_precision_flag,
  117. "Set whether to enable auto mixed precision.")
  118. .def("get_enable_reduce_precision_flag", &mindspore::MsContext::enable_reduce_precision,
  119. "Get whether to enable reduce precision.")
  120. .def("set_enable_reduce_precision_flag", &mindspore::MsContext::set_enable_reduce_precision,
  121. "Set whether to enable reduce precision.")
  122. .def("get_save_graphs_path", &mindspore::MsContext::save_graphs_path, "Get save graphs path.")
  123. .def("set_save_graphs_path", &mindspore::MsContext::set_save_graphs_path, "Set save graphs path.")
  124. .def("get_loop_sink_flag", &mindspore::MsContext::loop_sink_flag, "Get whether to enable loop sink.")
  125. .def("set_loop_sink_flag", &mindspore::MsContext::set_loop_sink_flag, "Set whether to enable loop sink.")
  126. .def("get_enable_mem_reuse", &mindspore::MsContext::enable_mem_reuse, "Get whether to enable mem reuse.")
  127. .def("set_enable_mem_reuse", &mindspore::MsContext::set_enable_mem_reuse, "Set whether to enable mem reuse.")
  128. .def("get_save_ms_model_flag", &mindspore::MsContext::save_ms_model_flag, "Get whether to save ms model.")
  129. .def("set_save_ms_model_flag", &mindspore::MsContext::set_save_ms_model_flag, "Set whether to save ms model.")
  130. .def("get_save_ms_model_path", &mindspore::MsContext::save_ms_model_path, "Get path to save ms model.")
  131. .def("set_save_ms_model_path", &mindspore::MsContext::set_save_ms_model_path, "Set path to save ms model")
  132. .def("get_enable_gpu_summary", &mindspore::MsContext::enable_gpu_summary, "Get whether to enable gpu summary.")
  133. .def("set_enable_gpu_summary", &mindspore::MsContext::set_enable_gpu_summary, "Set whether to enable gpu summary.")
  134. .def("get_enable_dump", &mindspore::MsContext::enable_dump, "Get whether to enable dump.")
  135. .def("set_enable_dump", &mindspore::MsContext::set_enable_dump, "Set whether to enable dump.")
  136. .def("get_save_dump_path", &mindspore::MsContext::save_dump_path, "Get path to dump.")
  137. .def("set_save_dump_path", &mindspore::MsContext::set_save_dump_path, "Set path to dump.")
  138. .def("get_enable_dynamic_mem_pool", &mindspore::MsContext::enable_dynamic_mem_pool,
  139. "Get whether to enable dynamic mem pool.")
  140. .def("set_enable_dynamic_mem_pool", &mindspore::MsContext::set_enable_dynamic_mem_pool,
  141. "Set whether to enable dynamic mem pool.")
  142. .def("set_graph_memory_max_size", &mindspore::MsContext::set_graph_memory_max_size, "set graph memory max size.")
  143. .def("set_variable_memory_max_size", &mindspore::MsContext::set_variable_memory_max_size,
  144. "set variable memory max size");
  145. (void)py::class_<ParallelContext, std::shared_ptr<ParallelContext>>(m, "AutoParallelContext")
  146. .def_static("get_instance", &ParallelContext::GetInstance, "Get auto parallel context instance.")
  147. .def("get_device_num", &ParallelContext::device_num, "Get device num.")
  148. .def("set_device_num", &ParallelContext::set_device_num, "Set device num.")
  149. .def("get_device_num_is_set", &ParallelContext::device_num_is_set, "Get device num is set.")
  150. .def("get_global_rank", &ParallelContext::global_rank, "Get global rank.")
  151. .def("set_global_rank", &ParallelContext::set_global_rank, "Set global rank.")
  152. .def("get_global_rank_is_set", &ParallelContext::global_rank_is_set, "Get global rank is set.")
  153. .def("get_mirror_mean", &ParallelContext::mirror_mean, "Get mirror mean.")
  154. .def("set_mirror_mean", &ParallelContext::set_mirror_mean, "Set mirror mean.")
  155. .def("get_cast_before_mirror", &ParallelContext::cast_before_mirror, "Get cast before mirror.")
  156. .def("set_cast_before_mirror", &ParallelContext::set_cast_before_mirror, "Set cast before mirror.")
  157. .def("get_loss_repeated_mean", &ParallelContext::loss_repeated_mean, "Get loss repeated mean.")
  158. .def("set_loss_repeated_mean", &ParallelContext::set_loss_repeated_mean, "Set loss repeated mean.")
  159. .def("get_communication_backend", &ParallelContext::communication_backend, "Get communication backend.")
  160. .def("set_communication_backend", &ParallelContext::set_communication_backend, "Set communication backend.")
  161. .def("get_parallel_mode", &ParallelContext::parallel_mode, "Get parallel mode.")
  162. .def("set_parallel_mode", &ParallelContext::set_parallel_mode, "Set parallel mode.")
  163. .def("get_strategy_search_mode", &ParallelContext::strategy_search_mode, "Get strategy search mode.")
  164. .def("set_strategy_search_mode", &ParallelContext::set_strategy_search_mode, "Set strategy search mode.")
  165. .def("set_all_reduce_fusion_split_indices", &ParallelContext::set_all_reduce_fusion_split_indices,
  166. "Set all reduce fusion split indices.")
  167. .def("get_all_reduce_fusion_split_indices", &ParallelContext::all_reduce_fusion_split_indices,
  168. "Get all reduce fusion split indices.")
  169. .def("set_all_reduce_fusion_split_sizes", &ParallelContext::set_all_reduce_fusion_split_sizes,
  170. "Set all reduce fusion split sizes.")
  171. .def("get_all_reduce_fusion_split_sizes", &ParallelContext::all_reduce_fusion_split_sizes,
  172. "Get all reduce fusion split sizes.")
  173. .def("set_enable_all_reduce_fusion", &ParallelContext::set_enable_all_reduce_fusion,
  174. "Set enable/disable all reduce fusion.")
  175. .def("get_enable_all_reduce_fusion", &ParallelContext::enable_all_reduce_fusion,
  176. "Get enable/disable all reduce fusion.")
  177. .def("get_parameter_broadcast", &ParallelContext::parameter_broadcast, "Get parameter broadcast.")
  178. .def("get_parameter_broadcast_is_set", &ParallelContext::parameter_broadcast_is_set,
  179. "Get parameter broadcast is set.")
  180. .def("set_parameter_broadcast", &ParallelContext::set_parameter_broadcast, "Set parameter broadcast.")
  181. .def("reset", &ParallelContext::Reset, "Reset auto parallel context.");
  182. (void)py::class_<CostModelContext, std::shared_ptr<CostModelContext>>(m, "CostModelContext")
  183. .def_static("get_instance", &CostModelContext::GetInstance, "Get cost_model context instance.")
  184. .def("set_device_memory_capacity", &CostModelContext::set_device_memory_capacity,
  185. "Set the capacity of device memory.")
  186. .def("get_device_memory_capacity", &CostModelContext::device_memory_capacity, "Get the capacity of device memory.")
  187. .def("set_costmodel_alpha", &CostModelContext::set_costmodel_alpha,
  188. "Set the parameter cost_model_alpha of the DP algorithm.")
  189. .def("get_costmodel_alpha", &CostModelContext::costmodel_alpha,
  190. "Get the parameter cost_model_alpha of the DP algorithm.")
  191. .def("set_costmodel_beta", &CostModelContext::set_costmodel_beta,
  192. "Set the parameter cost_model_beta of the DP algorithm.")
  193. .def("get_costmodel_beta", &CostModelContext::costmodel_beta,
  194. "Get the parameter cost_model_beta of the DP algorithm.")
  195. .def("set_costmodel_gamma", &CostModelContext::set_costmodel_gamma,
  196. "Set the parameter cost_model_gamma of the DP algorithm")
  197. .def("get_costmodel_gamma", &CostModelContext::costmodel_gamma,
  198. "Get the parameter cost_model_gamma of the DP algorithm.")
  199. .def("set_costmodel_communi_threshold", &CostModelContext::set_costmodel_communi_threshold,
  200. "Set the parameter cost_model_communi_threshold of the DP algorithm.")
  201. .def("get_costmodel_communi_threshold", &CostModelContext::costmodel_communi_threshold,
  202. "Get the parameter cost_model_communi_threshold of the DP algorithm.")
  203. .def("set_costmodel_communi_const", &CostModelContext::set_costmodel_communi_const,
  204. "Set the parameter cost_model_communi_const of the DP algorithm.")
  205. .def("get_costmodel_communi_const", &CostModelContext::costmodel_communi_const,
  206. "Get the parameter cost_model_communi_const of the DP algorithm.")
  207. .def("set_costmodel_communi_bias", &CostModelContext::set_costmodel_communi_bias,
  208. "Set the parameter cost_model_communi_bias of the DP algorithm.")
  209. .def("get_costmodel_communi_bias", &CostModelContext::costmodel_communi_bias,
  210. "Get the parameter cost_model_communi_bias of the DP algorithm.")
  211. .def("set_multi_subgraphs", &CostModelContext::set_multi_subgraphs, "Set the parameter is_multi_subgraphs.")
  212. .def("get_multi_subgraphs", &CostModelContext::is_multi_subgraphs, "Get the parameter is_multi_subgraphs.")
  213. .def("set_costmodel_allreduce_fusion_algorithm", &CostModelContext::set_costmodel_allreduce_fusion_algorithm,
  214. "Set the parameter gradient AllReduce fusion algorithm.")
  215. .def("get_costmodel_allreduce_fusion_algorithm", &CostModelContext::costmodel_allreduce_fusion_algorithm,
  216. "Get the parameter gradient AllReduce fusion algorithm.")
  217. .def("set_costmodel_allreduce_fusion_times", &CostModelContext::set_costmodel_allreduce_fusion_times,
  218. "Set the parameter gradient AllReduce times.")
  219. .def("get_costmodel_allreduce_fusion_times", &CostModelContext::costmodel_allreduce_fusion_times,
  220. "Get the parameter gradient AllReduce times.")
  221. .def("set_costmodel_allreduce_fusion_tail_percent", &CostModelContext::set_costmodel_allreduce_fusion_tail_percent,
  222. "Set the parameter gradient AllReduce fusion tail percent.")
  223. .def("get_costmodel_allreduce_fusion_tail_percent", &CostModelContext::costmodel_allreduce_fusion_tail_percent,
  224. "Get the parameter gradient AllReduce fusion tail percent.")
  225. .def("set_costmodel_allreduce_fusion_tail_time", &CostModelContext::set_costmodel_allreduce_fusion_tail_time,
  226. "Set the parameter gradient AllReduce fusion tail time.")
  227. .def("get_costmodel_allreduce_fusion_tail_time", &CostModelContext::costmodel_allreduce_fusion_tail_time,
  228. "Get the parameter gradient AllReduce fusion tail time.")
  229. .def("set_costmodel_allreduce_fusion_allreduce_inherent_time",
  230. &CostModelContext::set_costmodel_allreduce_fusion_allreduce_inherent_time,
  231. "Set the parameter gradient AllReduce fusion allreduce inherent time.")
  232. .def("get_costmodel_allreduce_fusion_allreduce_inherent_time",
  233. &CostModelContext::costmodel_allreduce_fusion_allreduce_inherent_time,
  234. "Get the parameter gradient AllReduce fusion allreduce inherent time.")
  235. .def("set_costmodel_allreduce_fusion_allreduce_bandwidth",
  236. &CostModelContext::set_costmodel_allreduce_fusion_allreduce_bandwidth,
  237. "Set the parameter gradient AllReduce fusion allreduce bandwidth.")
  238. .def("get_costmodel_allreduce_fusion_allreduce_bandwidth",
  239. &CostModelContext::costmodel_allreduce_fusion_allreduce_bandwidth,
  240. "Get the parameter gradient AllReduce fusion allreduce bandwidth.")
  241. .def("set_costmodel_allreduce_fusion_computation_time_parameter",
  242. &CostModelContext::set_costmodel_allreduce_fusion_computation_time_parameter,
  243. "Set the parameter gradient AllReduce fusion computation time parameter.")
  244. .def("get_costmodel_allreduce_fusion_computation_time_parameter",
  245. &CostModelContext::costmodel_allreduce_fusion_computation_time_parameter,
  246. "Get the parameter gradient AllReduce fusion computation time parameter.")
  247. .def("set_tensor_slice_align_enable", &CostModelContext::set_tensor_slice_alignment_enable,
  248. "Set the parameter tensor_slice_align_enable in strategy generation.")
  249. .def("get_tensor_slice_align_enable", &CostModelContext::tensor_slice_alignment_enable,
  250. "Get the parameter tensor_slice_align_enable in strategy generation.")
  251. .def("set_tensor_slice_align_size", &CostModelContext::set_tensor_slice_alignment_size,
  252. "Set the parameter tensor_slice_size in strategy generation.")
  253. .def("get_tensor_slice_align_size", &CostModelContext::tensor_slice_alignment_size,
  254. "Get the parameter tensor_slice_size in strategy generation.")
  255. .def("set_fully_use_devices", &CostModelContext::set_fully_use_device,
  256. "Set the parameter fully_use_devices in the DP algorithm.")
  257. .def("get_fully_use_devices", &CostModelContext::fully_use_device,
  258. "Get the parameter fully_use_devices in the DP algorithm.")
  259. .def("set_elementwise_op_strategy_follow", &CostModelContext::set_elementwise_stra_follow,
  260. "Set the parameter elementwise_op_strategy_follow in the DP algorithm.")
  261. .def("get_elementwise_op_strategy_follow", &CostModelContext::elementwise_stra_follow,
  262. "Get the parameter elementwise_op_strategy_follow in the DP algorithm.")
  263. .def("reset_cost_model", &CostModelContext::ResetCostModel, "Reset the CostModelContext.")
  264. .def("reset_algo_parameters", &CostModelContext::ResetAlgoParameters, "Reset the AlgoParameters.");
  265. (void)py::module::import("atexit").attr("register")(py::cpp_function{[&]() -> void {
  266. // only in case that c++ calling python interface, ClearResAtexit should be called.
  267. if (mindspore::parse::python_adapter::IsPythonEnv()) {
  268. mindspore::pipeline::ClearResAtexit();
  269. #ifdef ENABLE_MINDDATA
  270. py::module iterators = py::module::import("mindspore.dataset.engine.iterators");
  271. (void)iterators.attr("_cleanup")();
  272. #endif
  273. }
  274. }});
  275. (void)py::class_<EventWriter, std::shared_ptr<EventWriter>>(m, "EventWriter_")
  276. .def(py::init<const std::string &>())
  277. .def("GetFileName", &EventWriter::GetFileName, "Get the file name.")
  278. .def("Open", &EventWriter::Open, "Open the write file.")
  279. .def("Write", &EventWriter::Write, "Write the serialize event.")
  280. .def("EventCount", &EventWriter::GetWriteEventCount, "Write event count.")
  281. .def("Flush", &EventWriter::Flush, "Flush the event.")
  282. .def("Close", &EventWriter::Close, "Close the write.")
  283. .def("Shut", &EventWriter::Shut, "Final close the write.");
  284. (void)py::class_<OpLib, std::shared_ptr<OpLib>>(m, "Oplib")
  285. .def(py::init())
  286. .def("reg_op", &OpLib::RegOp, "Register op info.");
  287. #ifdef ENABLE_GPU_COLLECTIVE
  288. (void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::InitCollective,
  289. "Init gpu collective communication mode.");
  290. (void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::FinalizeCollective,
  291. "Finalize gpu collective communication mode.");
  292. #else
  293. (void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::InitCollective,
  294. "Init gpu collective communication mode.");
  295. (void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::FinalizeCollective,
  296. "Finalize gpu collective communication mode.");
  297. #endif
  298. }