You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ms_context.cc 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/context/ms_context.h"
  17. #include <thread>
  18. #include <atomic>
  19. #include <fstream>
  20. #include "./common.h"
  21. #include "utils/convert_utils.h"
  22. #include "utils/tensorprint_utils.h"
  23. #ifndef NO_DLIB
  24. #include "tdt/tsd_client.h"
  25. #include "tdt/tdt_host_interface.h"
  26. #include "tdt/data_common.h"
  27. #endif
  28. #ifdef ENABLE_GE
  29. #include "transform/df_graph_manager.h"
  30. #endif
  31. #include "ir/tensor.h"
  32. namespace mindspore {
  33. #ifdef ENABLE_GE
  34. using mindspore::transform::DfGraphManager;
  35. #endif
  36. std::atomic<bool> thread_1_must_end(false);
  37. std::shared_ptr<MsContext> MsContext::inst_context_ = nullptr;
  38. std::map<std::string, MsBackendPolicy> MsContext::policy_map_ = {{"ge", kMsBackendGePrior},
  39. {"vm", kMsBackendVmOnly},
  40. {"ms", kMsBackendMsPrior},
  41. {"ge_only", kMsBackendGeOnly},
  42. {"vm_prior", kMsBackendVmPrior}};
  43. MsContext::MsContext(const std::string &policy, const std::string &target) {
  44. save_graphs_flag_ = false;
  45. save_graphs_path_ = ".";
  46. save_ms_model_flag_ = false;
  47. save_ms_model_path_ = "./model.ms";
  48. enable_dump_ = false;
  49. save_dump_path_ = ".";
  50. tsd_ref_ = 0;
  51. ge_ref_ = 0;
  52. is_multi_graph_sink_ = false;
  53. is_pynative_ge_init_ = false;
  54. enable_reduce_precision_ = true;
  55. auto env_device = common::GetEnv("DEVICE_ID");
  56. if (!env_device.empty()) {
  57. device_id_ = UlongToUint(std::stoul(env_device.c_str()));
  58. } else {
  59. device_id_ = 0;
  60. }
  61. backend_policy_ = policy_map_[policy];
  62. device_target_ = target;
  63. execution_mode_ = kPynativeMode;
  64. enable_task_sink_ = true;
  65. ir_fusion_flag_ = true;
  66. enable_hccl_ = false;
  67. enable_mem_reuse_ = true;
  68. enable_gpu_summary_ = true;
  69. precompile_only_ = false;
  70. auto_mixed_precision_flag_ = false;
  71. enable_pynative_infer_ = false;
  72. enable_pynative_hook_ = false;
  73. enable_dynamic_mem_pool_ = true;
  74. graph_memory_max_size_ = "0";
  75. variable_memory_max_size_ = "0";
  76. enable_loop_sink_ = target == kAscendDevice || target == kDavinciDevice;
  77. profiling_mode_ = false;
  78. profiling_options_ = "training_trace";
  79. check_bprop_flag_ = false;
  80. max_device_memory_ = kDefaultMaxDeviceMemory;
  81. print_file_path_ = "";
  82. enable_graph_kernel_ = false;
  83. }
  84. std::shared_ptr<MsContext> MsContext::GetInstance() {
  85. if (inst_context_ == nullptr) {
  86. MS_LOG(DEBUG) << "Create new mindspore context";
  87. #ifdef ENABLE_GE
  88. inst_context_.reset(new (std::nothrow) MsContext("ge", kAscendDevice));
  89. #elif defined(ENABLE_D)
  90. inst_context_.reset(new (std::nothrow) MsContext("ms", kAscendDevice));
  91. #elif defined(ENABLE_GPU)
  92. inst_context_.reset(new (std::nothrow) MsContext("ms", kGPUDevice));
  93. #else
  94. inst_context_.reset(new (std::nothrow) MsContext("vm", kCPUDevice));
  95. #endif
  96. }
  97. return inst_context_;
  98. }
  99. bool MsContext::set_backend_policy(const std::string &policy) {
  100. if (policy_map_.find(policy) == policy_map_.end()) {
  101. MS_LOG(ERROR) << "invalid backend policy name: " << policy;
  102. return false;
  103. }
  104. backend_policy_ = policy_map_[policy];
  105. MS_LOG(INFO) << "ms set context backend policy:" << policy;
  106. return true;
  107. }
  108. std::string MsContext::backend_policy() const {
  109. auto res = std::find_if(
  110. policy_map_.begin(), policy_map_.end(),
  111. [&, this](const std::pair<std::string, MsBackendPolicy> &item) { return item.second == backend_policy_; });
  112. if (res != policy_map_.end()) {
  113. return res->first;
  114. }
  115. return "unknown";
  116. }
  117. void MsContext::set_execution_mode(int execution_mode) {
  118. if (execution_mode != kGraphMode && execution_mode != kPynativeMode) {
  119. MS_LOG(EXCEPTION) << "The execution mode is invalid!";
  120. }
  121. execution_mode_ = execution_mode;
  122. }
  123. bool MsContext::set_device_target(const std::string &target) {
  124. if (kTargetSet.find(target) == kTargetSet.end()) {
  125. MS_LOG(ERROR) << "invalid device target name: " << target;
  126. return false;
  127. }
  128. if (target == kDavinciDevice) {
  129. device_target_ = kAscendDevice;
  130. } else {
  131. device_target_ = target;
  132. }
  133. MS_LOG(INFO) << "ms set context device target:" << target;
  134. return true;
  135. }
  136. bool MsContext::set_device_id(uint32_t device_id) {
  137. device_id_ = device_id;
  138. MS_LOG(INFO) << "ms set context device id:" << device_id;
  139. return true;
  140. }
  141. #ifndef NO_DLIB
  142. // Open tdt dataset
  143. bool MsContext::OpenTsd() {
  144. if (is_pynative_ge_init_) {
  145. return true;
  146. }
  147. if (tsd_ref_) {
  148. MS_LOG(DEBUG) << "TDT Dataset client is already opened.";
  149. tsd_ref_++;
  150. return true;
  151. }
  152. unsigned int device_id;
  153. unsigned int rank_size = 1;
  154. device_id = device_id_;
  155. auto rank_size_env = common::GetEnv("RANK_SIZE");
  156. if (rank_size_env.empty()) {
  157. MS_LOG(INFO) << "Should config rank size.";
  158. rank_size = 1;
  159. } else {
  160. int rank_env = std::stoi(rank_size_env);
  161. if (rank_env <= 0) {
  162. MS_LOG(EXCEPTION) << "Error rank size " << rank_env << ".";
  163. }
  164. rank_size = IntToUint(rank_env);
  165. }
  166. MS_LOG(INFO) << "Device id = " << device_id << ", rank size = " << rank_size << ".";
  167. TDT_StatusT status = tdt::TsdClient::GetInstance()->Open(device_id, rank_size);
  168. if (status != TDT_OK) {
  169. MS_LOG(EXCEPTION) << "Device " << device_id << " is occupied, open tsd failed, status = " << status << ".";
  170. return false;
  171. }
  172. tsd_ref_++;
  173. #ifdef ENABLE_TDTQUE
  174. int32_t initStatus = tdt::TdtHostInit(device_id);
  175. if (initStatus != TDT_OK_CODE) {
  176. MS_LOG(EXCEPTION) << "Init tsd failed, status = " << initStatus << ".";
  177. return false;
  178. }
  179. tdt_print_ = std::thread(TensorPrint());
  180. #endif
  181. MS_LOG(INFO) << "Open and init tsd successful, tsd reference = " << tsd_ref_ << ".";
  182. return true;
  183. }
  184. bool MsContext::CloseTsd(bool force) {
  185. if (tsd_ref_ == 0) {
  186. return true;
  187. }
  188. tsd_ref_--;
  189. if (force || tsd_ref_ == 0) {
  190. tsd_ref_ = 0;
  191. #ifdef ENABLE_TDTQUE
  192. int32_t stopStatus = tdt::TdtHostStop(KNpuLog);
  193. if (stopStatus != TDT_OK_CODE) {
  194. MS_LOG(EXCEPTION) << "Stop tsd failed, status = " << stopStatus << ".";
  195. return false;
  196. }
  197. py::gil_scoped_release gil_release;
  198. int32_t destroyStatus = tdt::TdtHostDestroy();
  199. if (destroyStatus != TDT_OK_CODE) {
  200. MS_LOG(EXCEPTION) << "Destroy tsd failed, status = " << destroyStatus << ".";
  201. return false;
  202. }
  203. try {
  204. if (tdt_print_.joinable()) {
  205. MS_LOG(INFO) << "join tdt host receive process";
  206. tdt_print_.join();
  207. }
  208. } catch (const std::exception &e) {
  209. MS_LOG(ERROR) << "tdt thread join failed: " << e.what();
  210. }
  211. #endif
  212. TDT_StatusT status = tdt::TsdClient::GetInstance()->Close();
  213. if (status != TDT_OK) {
  214. MS_LOG(EXCEPTION) << "Close tsd failed, status = " << status << ".";
  215. return false;
  216. }
  217. is_pynative_ge_init_ = false;
  218. MS_LOG(INFO) << "Destroy and close tsd successful, status = " << status << ".";
  219. } else {
  220. MS_LOG(DEBUG) << "TDT Dataset client is used, no need to close, tsd reference = " << tsd_ref_ << ".";
  221. }
  222. return true;
  223. }
  224. #else
  225. bool MsContext::OpenTsd() { return true; }
  226. bool MsContext::CloseTsd(bool) { return true; }
  227. #endif
  228. void MsContext::SetHcclOptions(std::map<std::string, std::string> *ge_options) const {
  229. auto env_table_file = common::GetEnv("RANK_TABLE_FILE");
  230. auto env_rank_id = common::GetEnv("RANK_ID");
  231. auto env_device_id = std::to_string(device_id_);
  232. if (!(env_table_file.empty() || env_rank_id.empty())) {
  233. MS_LOG(INFO) << "Initialize Ge for distribute parameter";
  234. MS_LOG(INFO) << "Use hccl, make sure hccl lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  235. auto env_hccl_flag = common::GetEnv("HCCL_FLAG");
  236. if (!env_hccl_flag.empty()) {
  237. (*ge_options)["ge.exec.hcclFlag"] = env_hccl_flag;
  238. }
  239. (*ge_options)["ge.exec.isUseHcom"] = "1";
  240. (*ge_options)["ge.exec.deviceId"] = env_device_id;
  241. (*ge_options)["ge.exec.rankId"] = env_rank_id;
  242. (*ge_options)["ge.exec.podName"] = env_rank_id;
  243. (*ge_options)["ge.exec.rankTableFile"] = env_table_file;
  244. (*ge_options)["ge.graphRunMode"] = "1";
  245. } else {
  246. // device id is still needed for non-distribute case
  247. (*ge_options)["ge.exec.deviceId"] = env_device_id;
  248. MS_LOG(INFO) << "No hccl mode. "
  249. "If use hccl, make sure [RANK_TABLE_FILE,RANK_ID,DEVICE_ID,DEPLOY_MODE] all be set in ENV.";
  250. }
  251. auto env_deploy_mode = common::GetEnv("DEPLOY_MODE");
  252. if (!env_deploy_mode.empty()) {
  253. (*ge_options)["ge.exec.deployMode"] = env_deploy_mode;
  254. } else {
  255. (*ge_options)["ge.exec.deployMode"] = "0";
  256. MS_LOG(WARNING) << "DEPLOY_MODE is not set in ENV. Now set to default value 0";
  257. }
  258. }
  259. void MsContext::GetGeOptions(std::map<std::string, std::string> *ge_options) const {
  260. #ifdef ENABLE_GE
  261. (*ge_options)["device_id"] = "0";
  262. (*ge_options)["ge.exec.enableDump"] = std::to_string(enable_dump_);
  263. (*ge_options)["ge.exec.dumpPath"] = save_dump_path_;
  264. (*ge_options)["ge.exec.dumpMode"] = "output";
  265. MS_LOG(INFO) << "The enable dump state is " << std::to_string(enable_dump_) << " and save dump path is "
  266. << save_dump_path_ << ".";
  267. (*ge_options)["ge.exec.profilingMode"] = std::to_string(profiling_mode_);
  268. if (profiling_mode_) {
  269. (*ge_options)["ge.exec.profilingOptions"] = profiling_options_;
  270. }
  271. // only not supported in ge
  272. auto tbe_plugin_path = common::GetEnv("ME_TBE_PLUGIN_PATH");
  273. if (!tbe_plugin_path.empty()) {
  274. char real_path[PATH_MAX] = {0};
  275. if (nullptr == realpath(tbe_plugin_path.c_str(), real_path)) {
  276. MS_LOG(ERROR) << "Ms tbe plugin Path error, " << tbe_plugin_path;
  277. } else {
  278. tbe_plugin_path = real_path;
  279. (*ge_options)["ge.TBE_plugin_path"] = tbe_plugin_path;
  280. }
  281. } else {
  282. MS_LOG(ERROR) << "Set TBE plugin path failed!";
  283. }
  284. (*ge_options)["rank_table_file"] = "";
  285. auto env_ddk_version = common::GetEnv("DDK_VERSION");
  286. if (!env_ddk_version.empty()) {
  287. (*ge_options)["ge.DDK_version"] = env_ddk_version;
  288. } else {
  289. (*ge_options)["ge.DDK_version"] = "1.60.T17.B830";
  290. }
  291. (*ge_options)["graphType"] = "1";
  292. if (graph_memory_max_size_ != "0") {
  293. (*ge_options)["ge.graphMemoryMaxSize"] = graph_memory_max_size_;
  294. }
  295. if (variable_memory_max_size_ != "0") {
  296. (*ge_options)["ge.variableMemoryMaxSize"] = variable_memory_max_size_;
  297. }
  298. #if ENABLE_TRAIN == 1
  299. (*ge_options)["ge.graphRunMode"] = "1";
  300. #endif
  301. SetDisableReuseMemoryFlag(ge_options);
  302. SetHcclOptions(ge_options);
  303. auto env_job_id = common::GetEnv("JOB_ID");
  304. if (!env_job_id.empty()) {
  305. (*ge_options)["ge.exec.jobId"] = env_job_id;
  306. } else {
  307. (*ge_options)["ge.exec.jobId"] = "0";
  308. MS_LOG(WARNING) << "JOB_ID is not set in ENV. Now set to default value 0";
  309. }
  310. auto env_fe_flag = common::GetEnv("FE_FLAG");
  311. if (!env_fe_flag.empty()) {
  312. (*ge_options)["ge.feFlag"] = env_fe_flag;
  313. MS_LOG(INFO) << "Use FE, make sure fe lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  314. }
  315. auto env_aicpu_flag = common::GetEnv("AICPU_FLAG");
  316. if (!env_aicpu_flag.empty()) {
  317. (*ge_options)["ge.aicpuFlag"] = env_aicpu_flag;
  318. MS_LOG(INFO) << "Use AICPU, make sure aicpu lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  319. }
  320. // all libs are set in same env variable "OPTION_EXEC_EXTERN_PLUGIN_PATH", such as FE, HCCL, AICPU, etc
  321. auto load_path = common::GetEnv("OPTION_EXEC_EXTERN_PLUGIN_PATH");
  322. if (!load_path.empty()) {
  323. char real_path[PATH_MAX] = {0};
  324. if (realpath(load_path.c_str(), real_path)) {
  325. load_path = real_path;
  326. (*ge_options)["ge.soLoadPath"] = load_path;
  327. }
  328. } else {
  329. MS_LOG(ERROR) << "Set lib load path failed!";
  330. }
  331. auto proto_lib_path = common::GetEnv("OPTION_PROTO_LIB_PATH");
  332. if (!proto_lib_path.empty()) {
  333. char real_path[PATH_MAX] = {0};
  334. if (realpath(proto_lib_path.c_str(), real_path)) {
  335. proto_lib_path = real_path;
  336. (*ge_options)["ge.opsProtoLibPath"] = proto_lib_path;
  337. }
  338. } else {
  339. MS_LOG(ERROR) << "Set proto lib path failed!";
  340. }
  341. // Enable auto mixed precision according to the context options
  342. if (auto_mixed_precision_flag_) {
  343. (*ge_options)["ge.exec.precision_mode"] = "allow_mix_precision";
  344. } else {
  345. (*ge_options)["ge.exec.precision_mode"] = "allow_fp32_to_fp16";
  346. }
  347. // Disable the global variable acc, only enable it whlie adding training graph in pipeline
  348. (*ge_options)["ge.exec.variable_acc"] = "0";
  349. #endif
  350. }
  351. void MsContext::SetDisableReuseMemoryFlag(std::map<std::string, std::string> *ge_options) const {
  352. auto env_disable_reuse_memory = common::GetEnv("DISABLE_REUSE_MEMORY");
  353. if (!env_disable_reuse_memory.empty()) {
  354. (*ge_options)["ge.exec.disableReuseMemory"] = env_disable_reuse_memory;
  355. } else {
  356. (*ge_options)["ge.exec.disableReuseMemory"] = "0";
  357. MS_LOG(WARNING) << "DISABLE_REUSE_MEMORY is not set in ENV. Now set to default value 0";
  358. }
  359. }
  360. bool MsContext::InitGe() {
  361. #ifdef ENABLE_GE
  362. if (is_pynative_ge_init_) {
  363. return true;
  364. }
  365. if (ge_ref_) {
  366. ge_ref_++;
  367. return true;
  368. }
  369. std::map<std::string, std::string> ge_options;
  370. GetGeOptions(&ge_options);
  371. {
  372. // Release GIL before calling into (potentially long-running) C++ code
  373. py::gil_scoped_release release;
  374. if (ge::GEInitialize(ge_options) != ge::GRAPH_SUCCESS) {
  375. MS_LOG(EXCEPTION) << "Initialize GE failed!";
  376. }
  377. }
  378. ge_ref_++;
  379. MS_LOG(INFO) << "Init ge successful, ge reference = " << ge_ref_ << ".";
  380. #endif
  381. return true;
  382. }
  383. bool MsContext::FinalizeGe(bool force) {
  384. #ifdef ENABLE_GE
  385. if (ge_ref_ == 0) {
  386. return true;
  387. }
  388. ge_ref_--;
  389. if (force || ge_ref_ == 0) {
  390. ge_ref_ = 0;
  391. try {
  392. DfGraphManager::GetInstance().DeleteGraphRunner();
  393. DfGraphManager::GetInstance().DeleteGeSession();
  394. } catch (const std::exception &e) {
  395. MS_LOG(ERROR) << "Error occurred when deleting GE graph runner and session fail. Error: " << e.what();
  396. } catch (...) {
  397. std::string exName(abi::__cxa_current_exception_type()->name());
  398. MS_LOG(ERROR) << "Error occurred when deleting GE graph runner and session fail. Exception name: " << exName;
  399. }
  400. if (ge::GEFinalize() != ge::GRAPH_SUCCESS) {
  401. MS_LOG(WARNING) << "Finalize GE failed!";
  402. }
  403. is_pynative_ge_init_ = false;
  404. } else {
  405. MS_LOG(INFO) << "Ge is used, no need to finalize, tsd reference = " << ge_ref_ << ".";
  406. }
  407. #endif
  408. return true;
  409. }
  410. bool MsContext::PynativeInitGe() {
  411. if (is_pynative_ge_init_ || ge_ref_ || tsd_ref_) {
  412. return true;
  413. }
  414. (void)OpenTsd();
  415. (void)InitGe();
  416. is_pynative_ge_init_ = true;
  417. return true;
  418. }
  419. bool MsContext::IsTsdOpened() {
  420. if (tsd_ref_ > 0) {
  421. return true;
  422. }
  423. return false;
  424. }
  425. bool MsContext::IsGeInited() {
  426. if (ge_ref_ > 0) {
  427. return true;
  428. }
  429. return false;
  430. }
  431. } // namespace mindspore