You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ms_context.cc 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/context/ms_context.h"
  17. #include <thread>
  18. #include <atomic>
  19. #include <fstream>
  20. #include "./common.h"
  21. #include "utils/convert_utils.h"
  22. #include "utils/tensorprint_utils.h"
  23. #ifndef NO_DLIB
  24. #include "tdt/tsd_client.h"
  25. #include "tdt/tdt_host_interface.h"
  26. #include "tdt/data_common.h"
  27. #endif
  28. #ifdef ENABLE_GE
  29. #include "transform/df_graph_manager.h"
  30. #endif
  31. #include "ir/tensor.h"
  32. namespace mindspore {
  33. #ifdef ENABLE_GE
  34. using mindspore::transform::DfGraphManager;
  35. #endif
  36. std::atomic<bool> thread_1_must_end(false);
  37. std::shared_ptr<MsContext> MsContext::inst_context_ = nullptr;
  38. std::map<std::string, MsBackendPolicy> MsContext::policy_map_ = {{"ge", kMsBackendGePrior},
  39. {"vm", kMsBackendVmOnly},
  40. {"ms", kMsBackendMsPrior},
  41. {"ge_only", kMsBackendGeOnly},
  42. {"vm_prior", kMsBackendVmPrior}};
  43. MsContext::MsContext(const std::string &policy, const std::string &target) {
  44. save_graphs_flag_ = false;
  45. save_graphs_path_ = ".";
  46. save_ms_model_flag_ = false;
  47. save_ms_model_path_ = "./model.ms";
  48. enable_dump_ = false;
  49. save_dump_path_ = ".";
  50. tsd_ref_ = 0;
  51. ge_ref_ = 0;
  52. is_multi_graph_sink_ = false;
  53. is_pynative_ge_init_ = false;
  54. enable_reduce_precision_ = true;
  55. auto env_device = common::GetEnv("DEVICE_ID");
  56. if (!env_device.empty()) {
  57. device_id_ = UlongToUint(std::stoul(env_device.c_str()));
  58. } else {
  59. device_id_ = 0;
  60. }
  61. backend_policy_ = policy_map_[policy];
  62. device_target_ = target;
  63. execution_mode_ = kPynativeMode;
  64. enable_task_sink_ = true;
  65. ir_fusion_flag_ = true;
  66. enable_hccl_ = false;
  67. enable_mem_reuse_ = true;
  68. enable_gpu_summary_ = true;
  69. precompile_only_ = false;
  70. auto_mixed_precision_flag_ = false;
  71. enable_pynative_infer_ = false;
  72. enable_pynative_hook_ = false;
  73. enable_dynamic_mem_pool_ = true;
  74. graph_memory_max_size_ = "0";
  75. variable_memory_max_size_ = "0";
  76. enable_loop_sink_ = target == kAscendDevice || target == kDavinciDevice;
  77. profiling_mode_ = false;
  78. profiling_options_ = "training_trace";
  79. check_bprop_flag_ = false;
  80. max_device_memory_ = kDefaultMaxDeviceMemory;
  81. print_file_path_ = "";
  82. }
  83. std::shared_ptr<MsContext> MsContext::GetInstance() {
  84. if (inst_context_ == nullptr) {
  85. MS_LOG(DEBUG) << "Create new mindspore context";
  86. #ifdef ENABLE_GE
  87. inst_context_.reset(new (std::nothrow) MsContext("ge", kAscendDevice));
  88. #elif defined(ENABLE_D)
  89. inst_context_.reset(new (std::nothrow) MsContext("ms", kAscendDevice));
  90. #elif defined(ENABLE_GPU)
  91. inst_context_.reset(new (std::nothrow) MsContext("ms", kGPUDevice));
  92. #else
  93. inst_context_.reset(new (std::nothrow) MsContext("vm", kCPUDevice));
  94. #endif
  95. }
  96. return inst_context_;
  97. }
  98. bool MsContext::set_backend_policy(const std::string &policy) {
  99. if (policy_map_.find(policy) == policy_map_.end()) {
  100. MS_LOG(ERROR) << "invalid backend policy name: " << policy;
  101. return false;
  102. }
  103. backend_policy_ = policy_map_[policy];
  104. MS_LOG(INFO) << "ms set context backend policy:" << policy;
  105. return true;
  106. }
  107. std::string MsContext::backend_policy() const {
  108. auto res = std::find_if(
  109. policy_map_.begin(), policy_map_.end(),
  110. [&, this](const std::pair<std::string, MsBackendPolicy> &item) { return item.second == backend_policy_; });
  111. if (res != policy_map_.end()) {
  112. return res->first;
  113. }
  114. return "unknown";
  115. }
  116. void MsContext::set_execution_mode(int execution_mode) {
  117. if (execution_mode != kGraphMode && execution_mode != kPynativeMode) {
  118. MS_LOG(EXCEPTION) << "The execution mode is invalid!";
  119. }
  120. execution_mode_ = execution_mode;
  121. }
  122. bool MsContext::set_device_target(const std::string &target) {
  123. if (kTargetSet.find(target) == kTargetSet.end()) {
  124. MS_LOG(ERROR) << "invalid device target name: " << target;
  125. return false;
  126. }
  127. if (target == kDavinciDevice) {
  128. device_target_ = kAscendDevice;
  129. } else {
  130. device_target_ = target;
  131. }
  132. MS_LOG(INFO) << "ms set context device target:" << target;
  133. return true;
  134. }
  135. bool MsContext::set_device_id(uint32_t device_id) {
  136. device_id_ = device_id;
  137. MS_LOG(INFO) << "ms set context device id:" << device_id;
  138. return true;
  139. }
  140. #ifndef NO_DLIB
  141. // Open tdt dataset
  142. bool MsContext::OpenTsd() {
  143. if (is_pynative_ge_init_) {
  144. return true;
  145. }
  146. if (tsd_ref_) {
  147. MS_LOG(DEBUG) << "TDT Dataset client is already opened.";
  148. tsd_ref_++;
  149. return true;
  150. }
  151. unsigned int device_id;
  152. unsigned int rank_size = 1;
  153. device_id = device_id_;
  154. auto rank_size_env = common::GetEnv("RANK_SIZE");
  155. if (rank_size_env.empty()) {
  156. MS_LOG(INFO) << "Should config rank size.";
  157. rank_size = 1;
  158. } else {
  159. int rank_env = std::stoi(rank_size_env);
  160. if (rank_env <= 0) {
  161. MS_LOG(EXCEPTION) << "Error rank size " << rank_env << ".";
  162. }
  163. rank_size = IntToUint(rank_env);
  164. }
  165. MS_LOG(INFO) << "Device id = " << device_id << ", rank size = " << rank_size << ".";
  166. TDT_StatusT status = tdt::TsdClient::GetInstance()->Open(device_id, rank_size);
  167. if (status != TDT_OK) {
  168. MS_LOG(EXCEPTION) << "Device " << device_id << " is occupied, open tsd failed, status = " << status << ".";
  169. return false;
  170. }
  171. tsd_ref_++;
  172. #ifdef ENABLE_TDTQUE
  173. int32_t initStatus = tdt::TdtHostInit(device_id);
  174. if (initStatus != TDT_OK_CODE) {
  175. MS_LOG(EXCEPTION) << "Init tsd failed, status = " << initStatus << ".";
  176. return false;
  177. }
  178. tdt_print_ = std::thread(TensorPrint());
  179. #endif
  180. MS_LOG(INFO) << "Open and init tsd successful, tsd reference = " << tsd_ref_ << ".";
  181. return true;
  182. }
  183. bool MsContext::CloseTsd(bool force) {
  184. if (tsd_ref_ == 0) {
  185. return true;
  186. }
  187. tsd_ref_--;
  188. if (force || tsd_ref_ == 0) {
  189. tsd_ref_ = 0;
  190. #ifdef ENABLE_TDTQUE
  191. int32_t stopStatus = tdt::TdtHostStop(KNpuLog);
  192. if (stopStatus != TDT_OK_CODE) {
  193. MS_LOG(EXCEPTION) << "Stop tsd failed, status = " << stopStatus << ".";
  194. return false;
  195. }
  196. py::gil_scoped_release gil_release;
  197. int32_t destroyStatus = tdt::TdtHostDestroy();
  198. if (destroyStatus != TDT_OK_CODE) {
  199. MS_LOG(EXCEPTION) << "Destroy tsd failed, status = " << destroyStatus << ".";
  200. return false;
  201. }
  202. try {
  203. if (tdt_print_.joinable()) {
  204. MS_LOG(INFO) << "join tdt host receive process";
  205. tdt_print_.join();
  206. }
  207. } catch (const std::exception &e) {
  208. MS_LOG(ERROR) << "tdt thread join failed: " << e.what();
  209. }
  210. #endif
  211. TDT_StatusT status = tdt::TsdClient::GetInstance()->Close();
  212. if (status != TDT_OK) {
  213. MS_LOG(EXCEPTION) << "Close tsd failed, status = " << status << ".";
  214. return false;
  215. }
  216. is_pynative_ge_init_ = false;
  217. MS_LOG(INFO) << "Destroy and close tsd successful, status = " << status << ".";
  218. } else {
  219. MS_LOG(DEBUG) << "TDT Dataset client is used, no need to close, tsd reference = " << tsd_ref_ << ".";
  220. }
  221. return true;
  222. }
  223. #else
  224. bool MsContext::OpenTsd() { return true; }
  225. bool MsContext::CloseTsd(bool) { return true; }
  226. #endif
  227. void MsContext::SetHcclOptions(std::map<std::string, std::string> *ge_options) const {
  228. auto env_table_file = common::GetEnv("RANK_TABLE_FILE");
  229. auto env_rank_id = common::GetEnv("RANK_ID");
  230. auto env_device_id = std::to_string(device_id_);
  231. if (!(env_table_file.empty() || env_rank_id.empty())) {
  232. MS_LOG(INFO) << "Initialize Ge for distribute parameter";
  233. MS_LOG(INFO) << "Use hccl, make sure hccl lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  234. auto env_hccl_flag = common::GetEnv("HCCL_FLAG");
  235. if (!env_hccl_flag.empty()) {
  236. (*ge_options)["ge.exec.hcclFlag"] = env_hccl_flag;
  237. }
  238. (*ge_options)["ge.exec.isUseHcom"] = "1";
  239. (*ge_options)["ge.exec.deviceId"] = env_device_id;
  240. (*ge_options)["ge.exec.rankId"] = env_rank_id;
  241. (*ge_options)["ge.exec.podName"] = env_rank_id;
  242. (*ge_options)["ge.exec.rankTableFile"] = env_table_file;
  243. (*ge_options)["ge.graphRunMode"] = "1";
  244. } else {
  245. // device id is still needed for non-distribute case
  246. (*ge_options)["ge.exec.deviceId"] = env_device_id;
  247. MS_LOG(INFO) << "No hccl mode. "
  248. "If use hccl, make sure [RANK_TABLE_FILE,RANK_ID,DEVICE_ID,DEPLOY_MODE] all be set in ENV.";
  249. }
  250. auto env_deploy_mode = common::GetEnv("DEPLOY_MODE");
  251. if (!env_deploy_mode.empty()) {
  252. (*ge_options)["ge.exec.deployMode"] = env_deploy_mode;
  253. } else {
  254. (*ge_options)["ge.exec.deployMode"] = "0";
  255. MS_LOG(WARNING) << "DEPLOY_MODE is not set in ENV. Now set to default value 0";
  256. }
  257. }
  258. void MsContext::GetGeOptions(std::map<std::string, std::string> *ge_options) const {
  259. #ifdef ENABLE_GE
  260. (*ge_options)["device_id"] = "0";
  261. (*ge_options)["ge.exec.enableDump"] = std::to_string(enable_dump_);
  262. (*ge_options)["ge.exec.dumpPath"] = save_dump_path_;
  263. (*ge_options)["ge.exec.dumpMode"] = "output";
  264. MS_LOG(INFO) << "The enable dump state is " << std::to_string(enable_dump_) << " and save dump path is "
  265. << save_dump_path_ << ".";
  266. (*ge_options)["ge.exec.profilingMode"] = std::to_string(profiling_mode_);
  267. if (profiling_mode_) {
  268. (*ge_options)["ge.exec.profilingOptions"] = profiling_options_;
  269. }
  270. // only not supported in ge
  271. auto tbe_plugin_path = common::GetEnv("ME_TBE_PLUGIN_PATH");
  272. if (!tbe_plugin_path.empty()) {
  273. char real_path[PATH_MAX] = {0};
  274. if (nullptr == realpath(tbe_plugin_path.c_str(), real_path)) {
  275. MS_LOG(ERROR) << "Ms tbe plugin Path error, " << tbe_plugin_path;
  276. } else {
  277. tbe_plugin_path = real_path;
  278. (*ge_options)["ge.TBE_plugin_path"] = tbe_plugin_path;
  279. }
  280. } else {
  281. MS_LOG(ERROR) << "Set TBE plugin path failed!";
  282. }
  283. (*ge_options)["rank_table_file"] = "";
  284. auto env_ddk_version = common::GetEnv("DDK_VERSION");
  285. if (!env_ddk_version.empty()) {
  286. (*ge_options)["ge.DDK_version"] = env_ddk_version;
  287. } else {
  288. (*ge_options)["ge.DDK_version"] = "1.60.T17.B830";
  289. }
  290. (*ge_options)["graphType"] = "1";
  291. if (graph_memory_max_size_ != "0") {
  292. (*ge_options)["ge.graphMemoryMaxSize"] = graph_memory_max_size_;
  293. }
  294. if (variable_memory_max_size_ != "0") {
  295. (*ge_options)["ge.variableMemoryMaxSize"] = variable_memory_max_size_;
  296. }
  297. #if ENABLE_TRAIN == 1
  298. (*ge_options)["ge.graphRunMode"] = "1";
  299. #endif
  300. SetDisableReuseMemoryFlag(ge_options);
  301. SetHcclOptions(ge_options);
  302. auto env_job_id = common::GetEnv("JOB_ID");
  303. if (!env_job_id.empty()) {
  304. (*ge_options)["ge.exec.jobId"] = env_job_id;
  305. } else {
  306. (*ge_options)["ge.exec.jobId"] = "0";
  307. MS_LOG(WARNING) << "JOB_ID is not set in ENV. Now set to default value 0";
  308. }
  309. auto env_fe_flag = common::GetEnv("FE_FLAG");
  310. if (!env_fe_flag.empty()) {
  311. (*ge_options)["ge.feFlag"] = env_fe_flag;
  312. MS_LOG(INFO) << "Use FE, make sure fe lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  313. }
  314. auto env_aicpu_flag = common::GetEnv("AICPU_FLAG");
  315. if (!env_aicpu_flag.empty()) {
  316. (*ge_options)["ge.aicpuFlag"] = env_aicpu_flag;
  317. MS_LOG(INFO) << "Use AICPU, make sure aicpu lib is set in OPTION_EXEC_EXTERN_PLUGIN_PATH.";
  318. }
  319. // all libs are set in same env variable "OPTION_EXEC_EXTERN_PLUGIN_PATH", such as FE, HCCL, AICPU, etc
  320. auto load_path = common::GetEnv("OPTION_EXEC_EXTERN_PLUGIN_PATH");
  321. if (!load_path.empty()) {
  322. char real_path[PATH_MAX] = {0};
  323. if (realpath(load_path.c_str(), real_path)) {
  324. load_path = real_path;
  325. (*ge_options)["ge.soLoadPath"] = load_path;
  326. }
  327. } else {
  328. MS_LOG(ERROR) << "Set lib load path failed!";
  329. }
  330. auto proto_lib_path = common::GetEnv("OPTION_PROTO_LIB_PATH");
  331. if (!proto_lib_path.empty()) {
  332. char real_path[PATH_MAX] = {0};
  333. if (realpath(proto_lib_path.c_str(), real_path)) {
  334. proto_lib_path = real_path;
  335. (*ge_options)["ge.opsProtoLibPath"] = proto_lib_path;
  336. }
  337. } else {
  338. MS_LOG(ERROR) << "Set proto lib path failed!";
  339. }
  340. // Enable auto mixed precision according to the context options
  341. if (auto_mixed_precision_flag_) {
  342. (*ge_options)["ge.exec.precision_mode"] = "allow_mix_precision";
  343. } else {
  344. (*ge_options)["ge.exec.precision_mode"] = "allow_fp32_to_fp16";
  345. }
  346. // Disable the global variable acc, only enable it whlie adding training graph in pipeline
  347. (*ge_options)["ge.exec.variable_acc"] = "0";
  348. #endif
  349. }
  350. void MsContext::SetDisableReuseMemoryFlag(std::map<std::string, std::string> *ge_options) const {
  351. auto env_disable_reuse_memory = common::GetEnv("DISABLE_REUSE_MEMORY");
  352. if (!env_disable_reuse_memory.empty()) {
  353. (*ge_options)["ge.exec.disableReuseMemory"] = env_disable_reuse_memory;
  354. } else {
  355. (*ge_options)["ge.exec.disableReuseMemory"] = "0";
  356. MS_LOG(WARNING) << "DISABLE_REUSE_MEMORY is not set in ENV. Now set to default value 0";
  357. }
  358. }
  359. bool MsContext::InitGe() {
  360. #ifdef ENABLE_GE
  361. if (is_pynative_ge_init_) {
  362. return true;
  363. }
  364. if (ge_ref_) {
  365. ge_ref_++;
  366. return true;
  367. }
  368. std::map<std::string, std::string> ge_options;
  369. GetGeOptions(&ge_options);
  370. {
  371. // Release GIL before calling into (potentially long-running) C++ code
  372. py::gil_scoped_release release;
  373. if (ge::GEInitialize(ge_options) != ge::GRAPH_SUCCESS) {
  374. MS_LOG(EXCEPTION) << "Initialize GE failed!";
  375. }
  376. }
  377. ge_ref_++;
  378. MS_LOG(INFO) << "Init ge successful, ge reference = " << ge_ref_ << ".";
  379. #endif
  380. return true;
  381. }
  382. bool MsContext::FinalizeGe(bool force) {
  383. #ifdef ENABLE_GE
  384. if (ge_ref_ == 0) {
  385. return true;
  386. }
  387. ge_ref_--;
  388. if (force || ge_ref_ == 0) {
  389. ge_ref_ = 0;
  390. try {
  391. DfGraphManager::GetInstance().DeleteGraphRunner();
  392. DfGraphManager::GetInstance().DeleteGeSession();
  393. } catch (const std::exception &e) {
  394. MS_LOG(ERROR) << "Error occurred when deleting GE graph runner and session fail. Error: " << e.what();
  395. } catch (...) {
  396. std::string exName(abi::__cxa_current_exception_type()->name());
  397. MS_LOG(ERROR) << "Error occurred when deleting GE graph runner and session fail. Exception name: " << exName;
  398. }
  399. if (ge::GEFinalize() != ge::GRAPH_SUCCESS) {
  400. MS_LOG(WARNING) << "Finalize GE failed!";
  401. }
  402. is_pynative_ge_init_ = false;
  403. } else {
  404. MS_LOG(INFO) << "Ge is used, no need to finalize, tsd reference = " << ge_ref_ << ".";
  405. }
  406. #endif
  407. return true;
  408. }
  409. bool MsContext::PynativeInitGe() {
  410. if (is_pynative_ge_init_ || ge_ref_ || tsd_ref_) {
  411. return true;
  412. }
  413. (void)OpenTsd();
  414. (void)InitGe();
  415. is_pynative_ge_init_ = true;
  416. return true;
  417. }
  418. bool MsContext::IsTsdOpened() {
  419. if (tsd_ref_ > 0) {
  420. return true;
  421. }
  422. return false;
  423. }
  424. bool MsContext::IsGeInited() {
  425. if (ge_ref_ > 0) {
  426. return true;
  427. }
  428. return false;
  429. }
  430. } // namespace mindspore