You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 68 kB

4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805
  1. /**
  2. * Copyright 2020-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <dirent.h>
  17. #include <cstdio>
  18. #include <fstream>
  19. #include <tuple>
  20. #include <vector>
  21. #include <algorithm>
  22. #include <iostream>
  23. #include <cstring>
  24. #include <utility>
  25. #include <map>
  26. #include <regex>
  27. #include "debug/debugger/debugger.h"
  28. #include "debug/data_dump/dump_json_parser.h"
  29. #include "backend/common/session/session_basic.h"
  30. #include "backend/common/session/anf_runtime_algorithm.h"
  31. #include "include/common/utils/anfalgo.h"
  32. #include "runtime/device/kernel_runtime_manager.h"
  33. #include "runtime/device/kernel_runtime.h"
  34. #include "debug/data_dump/e2e_dump.h"
  35. #include "include/common/utils/config_manager.h"
  36. #include "include/common/debug/env_config_parser.h"
  37. #include "include/common/utils/comm_manager.h"
  38. #include "runtime/hardware/device_context_manager.h"
  39. #include "include/common/debug/anf_ir_dump.h"
  40. #include "include/common/debug/anf_dump_utils.h"
  41. #include "runtime/graph_scheduler/device_tensor_store.h"
  42. #ifdef ENABLE_DEBUGGER
  43. #include "debug/debugger/proto_exporter.h"
  44. #else
  45. #include "debug/debugger/proto_exporter_stub.h"
  46. #endif
  47. using debugger::Chunk;
  48. using debugger::EventReply;
  49. using debugger::GraphProto;
  50. using debugger::ModelProto;
  51. using debugger::Statistics;
  52. using debugger::TensorProto;
  53. using debugger::WatchCondition;
  54. using debugger::WatchCondition_Condition_inf;
  55. using debugger::WatchCondition_Condition_nan;
  56. using debugger::WatchCondition_Parameter;
  57. using debugger::WatchNode;
  58. using debugger::WatchpointHit;
  59. using mindspore::runtime::DeviceTensorStore;
  60. namespace mindspore {
  61. static constexpr auto g_chunk_size = 1024 * 1024 * 3;
  62. static constexpr int32_t heartbeat_period_second = 30;
  63. Debugger::Debugger()
  64. : grpc_client_(nullptr),
  65. debug_services_(nullptr),
  66. heartbeat_thread_(nullptr),
  67. device_id_(0),
  68. device_target_(""),
  69. num_step_(0),
  70. debugger_enabled_(false),
  71. suspended_at_last_kernel_(false),
  72. run_level_(""),
  73. node_name_(""),
  74. cur_name_(""),
  75. training_done_(false),
  76. send_metadata_done_(false),
  77. received_new_graph_(false),
  78. is_dataset_graph_(false),
  79. partial_memory_(false),
  80. initial_suspend_(true),
  81. enable_heartbeat_(false),
  82. not_dataset_graph_sum_(0),
  83. ascend_kernel_by_kernel_(false),
  84. version_("") {
  85. CheckDebuggerEnabledParam();
  86. auto ms_context = MsContext::GetInstance();
  87. MS_EXCEPTION_IF_NULL(ms_context);
  88. std::string device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  89. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  90. if (!CheckDebuggerEnabled()) {
  91. return;
  92. } else if (device_target == kCPUDevice) {
  93. MS_LOG(WARNING) << "Not enabling debugger. Debugger does not support CPU.";
  94. } else {
  95. // configure partial memory reuse
  96. partial_memory_ = CheckDebuggerPartialMemoryEnabled();
  97. // switch memory reuse on or off
  98. EnvConfigParser::GetInstance().SetSysMemreuse(partial_memory_);
  99. // print some message about memory reuse to user
  100. if (partial_memory_) {
  101. MS_LOG(WARNING)
  102. << "Partial Memory Reuse is enabled. Note: 1. Please only set watchpoints before running the first "
  103. "step. 2. Tensor values are only available for nodes that are watched by any watchpoint.";
  104. } else {
  105. MS_LOG(WARNING)
  106. << "Memory Reuse is disabled. Set environment variable MS_DEBUGGER_PARTIAL_MEM=1 to reduce memory "
  107. "usage for large models.";
  108. }
  109. }
  110. }
  111. void Debugger::Init(const uint32_t device_id, const std::string device_target) {
  112. // access lock for public method
  113. std::lock_guard<std::mutex> a_lock(access_lock_);
  114. // save device_id
  115. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  116. device_id_ = device_id;
  117. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  118. device_target_ = device_target;
  119. version_ = MSVERSION;
  120. }
  121. bool IsTypeDebuggerSupported(TypeId type) {
  122. if (type < TypeId::kNumberTypeEnd && type > TypeId::kNumberTypeBegin && type != kNumberTypeComplex64) {
  123. return true;
  124. }
  125. MS_LOG(INFO) << "Debugger does not support type: " << TypeIdLabel(type);
  126. return false;
  127. }
  128. void Debugger::EnableDebugger() {
  129. // reset some of the class members
  130. num_step_ = 0;
  131. debugger_enabled_ = false;
  132. enable_heartbeat_ = false;
  133. partial_memory_ = false;
  134. grpc_client_ = nullptr;
  135. debug_services_ = nullptr;
  136. heartbeat_thread_ = nullptr;
  137. // see if dump using debugger backend is enabled
  138. bool dump_enabled = CheckDebuggerDumpEnabled();
  139. MS_LOG(INFO) << "dump using debugger backend = " << dump_enabled;
  140. // check if debugger enabled
  141. debugger_enabled_ = CheckDebuggerEnabled();
  142. MS_LOG(INFO) << "debugger_enabled_ = " << debugger_enabled_;
  143. if (!debugger_enabled_ && !dump_enabled) {
  144. MS_LOG(INFO) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  145. return;
  146. }
  147. if (debugger_enabled_) {
  148. // configure grpc host
  149. std::string env_host_str = common::GetEnv("MS_DEBUGGER_HOST");
  150. std::string host;
  151. if (!env_host_str.empty()) {
  152. if (CheckIp(env_host_str)) {
  153. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  154. host = env_host_str;
  155. } else {
  156. debugger_enabled_ = false;
  157. MS_EXCEPTION(ValueError) << "Environment variable MS_DEBUGGER_HOST isn't a valid IP address. "
  158. "Please set environment variable MS_DEBUGGER_HOST=x.x.x.x to a valid IP";
  159. }
  160. } else {
  161. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  162. host = "localhost";
  163. }
  164. // configure grpc port
  165. std::string env_port_str = common::GetEnv("MS_DEBUGGER_PORT");
  166. std::string port;
  167. if (!env_port_str.empty()) {
  168. if (CheckPort(env_port_str)) {
  169. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  170. port = env_port_str;
  171. } else {
  172. debugger_enabled_ = false;
  173. MS_EXCEPTION(ValueError) << "Environment variable MS_DEBUGGER_PORT is not valid. Custom port ranging from 1 to "
  174. "65535";
  175. }
  176. } else {
  177. port = "50051";
  178. if (!CheckPort(port)) {
  179. MS_EXCEPTION(ValueError) << "Default MS_DEBUGGER_PORT is not valid. Custom port ranging from 1 to 65535";
  180. }
  181. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  182. }
  183. // initialize grpc client
  184. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  185. // initialize sending heartbeat
  186. heartbeat_thread_ = std::make_unique<std::thread>([this]() { SendHeartbeat(heartbeat_period_second); });
  187. }
  188. debug_services_ = std::make_unique<DebugServices>();
  189. }
  190. void Debugger::CheckDatasetSinkMode(const KernelGraphPtr &graph_ptr) {
  191. bool sink_mode = ConfigManager::GetInstance().dataset_mode() || graph_ptr->IsDatasetGraph();
  192. if (CheckDebuggerDumpEnabled() && sink_mode && device_target_ == kGPUDevice) {
  193. MS_EXCEPTION(NotSupportError)
  194. << "e2e_dump is not supported on GPU with dataset_sink_mode=True. Please set dataset_sink_mode=False";
  195. }
  196. if (CheckDebuggerEnabled() && sink_mode) {
  197. MS_EXCEPTION(NotSupportError)
  198. << "Debugger is not supported with dataset_sink_mode=True. Please set dataset_sink_mode=False";
  199. }
  200. }
  201. bool Debugger::CheckDebuggerDumpEnabled() const {
  202. // see if dump is enabled
  203. auto &dump_json_parser = DumpJsonParser::GetInstance();
  204. if (device_target_ == kGPUDevice) {
  205. return dump_json_parser.e2e_dump_enabled();
  206. } else if (device_target_ == kAscendDevice) {
  207. return dump_json_parser.async_dump_enabled() || dump_json_parser.e2e_dump_enabled();
  208. }
  209. return false;
  210. }
  211. bool Debugger::CheckDebuggerEnabled() const {
  212. // get env variables to configure debugger
  213. std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
  214. if (!env_enable_str.empty()) {
  215. (void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
  216. if ((env_enable_str == "1" || env_enable_str == "true") && device_target_ != kCPUDevice) {
  217. return true;
  218. }
  219. }
  220. return false;
  221. }
  222. void Debugger::CheckDebuggerEnabledParam() const {
  223. // check the value of env variable ENABLE_MS_DEBUGGER
  224. std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
  225. if (!env_enable_str.empty()) {
  226. (void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
  227. if (env_enable_str != "0" && env_enable_str != "1" && env_enable_str != "false" && env_enable_str != "true") {
  228. MS_LOG(WARNING) << "Env variable ENABLE_MS_DEBUGGER should be True/False/1/0 (case insensitive), but get: "
  229. << env_enable_str;
  230. }
  231. }
  232. }
  233. bool Debugger::CheckDebuggerPartialMemoryEnabled() const {
  234. std::string env_partial_mem_str = common::GetEnv("MS_DEBUGGER_PARTIAL_MEM");
  235. if (!env_partial_mem_str.empty()) {
  236. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
  237. if (env_partial_mem_str == "1") {
  238. return true;
  239. }
  240. }
  241. return false;
  242. }
  243. /*
  244. * Feature group: Dump, Online debugger.
  245. * Target device group: Ascend, GPU.
  246. * Runtime category: Old runtime, MindRT
  247. * Description: Returns true if online debugger or dump is enabled.
  248. */
  249. bool Debugger::DebuggerBackendEnabled() const { return CheckDebuggerDumpEnabled() || CheckDebuggerEnabled(); }
  250. void Debugger::Reset() {
  251. // access lock for public method
  252. std::lock_guard<std::mutex> a_lock(access_lock_);
  253. // reset components
  254. if (heartbeat_thread_ && heartbeat_thread_->joinable()) {
  255. SetEnableHeartbeat(false);
  256. heartbeat_thread_->join();
  257. MS_LOG(INFO) << "Join Heartbeat thread.";
  258. }
  259. heartbeat_thread_ = nullptr;
  260. device_id_ = 0;
  261. device_target_ = "";
  262. num_step_ = 0;
  263. debugger_enabled_ = false;
  264. is_dataset_graph_ = false;
  265. partial_memory_ = false;
  266. graph_ptr_ = nullptr;
  267. grpc_client_ = nullptr;
  268. debug_services_ = nullptr;
  269. graph_proto_list_.clear();
  270. graph_ptr_list_.clear();
  271. graph_ptr_step_vec_.clear();
  272. parameters_mindRT_.clear();
  273. visited_root_graph_ids_.clear();
  274. MS_LOG(INFO) << "Release Debugger resource.";
  275. }
  276. /*
  277. * Feature group: Dump, Online debugger.
  278. * Target device group: Ascend, GPU.
  279. * Runtime category: MindRT.
  280. * Description: Sets root_graph_id for all the graphs in the compiled graph list. Sets cur_root_graph_id_ and
  281. * prev_root_graph_id_ and calls PreExecute function for all the graphs.
  282. */
  283. void Debugger::PreExecuteGraphDebugger(const std::vector<KernelGraphPtr> &graphs,
  284. const std::vector<AnfNodePtr> &origin_parameters_order) {
  285. // MindRTBackend for GPU and Ascend
  286. if (device_target_ == kCPUDevice) {
  287. return;
  288. }
  289. // Store graphs that are run in one step.
  290. graph_ptr_step_vec_ = graphs;
  291. parameters_mindRT_ = origin_parameters_order;
  292. prev_root_graph_id_ = cur_root_graph_id_;
  293. // set first run graph as the root graph
  294. cur_root_graph_id_ = graph_ptr_step_vec_[0]->graph_id();
  295. MS_LOG(DEBUG) << "Current root graph id: " << cur_root_graph_id_ << " prev_root_graph_id_: " << prev_root_graph_id_
  296. << " for step: " << num_step_ << ".";
  297. MS_LOG(DEBUG) << "Set root graph for all the subgraphs:";
  298. for (size_t graph_index = 0; graph_index < graphs.size(); ++graph_index) {
  299. const auto &graph = graphs[graph_index];
  300. // set root graph id for GPU mindrt runtime.
  301. MS_LOG(DEBUG) << "Set root graph for graph: " << graph->graph_id() << " to: " << cur_root_graph_id_ << ".";
  302. graph->set_root_graph_id(cur_root_graph_id_);
  303. if (debugger_) {
  304. debugger_->PreExecute(graph);
  305. }
  306. }
  307. }
  308. /*
  309. * Feature group: Dump.
  310. * Target device group: Ascend.
  311. * Runtime category: Old runtime, MindRT.
  312. * Description: When async dump is enabled and dataset_sink_mode is true, graph_iter_num_map_ stores the number of
  313. * iterations per epoch for each running graph.
  314. */
  315. void Debugger::UpdateGraphIterMap(uint32_t graph_id, int32_t iter_num) {
  316. if (graph_iter_num_map_.find(graph_id) == graph_iter_num_map_.end()) {
  317. graph_iter_num_map_[graph_id] = iter_num;
  318. }
  319. }
  320. /*
  321. * Feature group: Dump, Online debugger.
  322. * Target device group: Ascend.
  323. * Runtime category: Old runtime.
  324. * Description: For Ascend old runtime, this function sets the current and previous root graph id.
  325. */
  326. void Debugger::SetCurrentAndPrevRootGraph(uint32_t root_graph_id) {
  327. // for GPU and ascend MindRT root graphs are set in PreExecuteGraphDebugger.
  328. if (device_target_ != kAscendDevice || MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  329. return;
  330. }
  331. prev_root_graph_id_ = cur_root_graph_id_;
  332. cur_root_graph_id_ = root_graph_id;
  333. MS_LOG(DEBUG) << "Current root graph id: " << cur_root_graph_id_ << " prev_root_graph_id_: " << prev_root_graph_id_
  334. << " for step: " << num_step_ << ".";
  335. }
  336. /*
  337. * Feature group: Dump, Online debugger.
  338. * Target device group: GPU.
  339. * Runtime category: Old runtime.
  340. * Description: In the case of GPU old runtime and when we have multiple subgraphs, we use the first run graph id to
  341. * update the step number.
  342. */
  343. void Debugger::StoreRunGraphIdList(uint32_t graph_id) {
  344. // collect rungrap_ids to update step number in multigraph case for GPU old runtime
  345. if (!rungraph_id_list_.size()) {
  346. rungraph_id_list_.push_back(graph_id);
  347. } else {
  348. if (std::find(rungraph_id_list_.begin(), rungraph_id_list_.end(), graph_id) == rungraph_id_list_.end()) {
  349. rungraph_id_list_.push_back(graph_id);
  350. }
  351. }
  352. }
  353. /*
  354. * Feature group: Dump, Online debugger.
  355. * Target device group: Ascend, GPU.
  356. * Runtime category: Old runtime, MindRT.
  357. * Description: Sets previous and current root_graph_id for Ascend old runtime, sends graphs to online debugger when
  358. * debugger_enabled_ is true.
  359. */
  360. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  361. MS_EXCEPTION_IF_NULL(graph_ptr);
  362. // access lock for public method
  363. std::lock_guard<std::mutex> a_lock(access_lock_);
  364. if (!MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  365. // Checking dataset_sink_mode for mindRT is done in debug_actor
  366. CheckDatasetSinkMode(graph_ptr);
  367. }
  368. auto graph_id = graph_ptr->graph_id();
  369. MS_LOG(DEBUG) << "PreExecute for graph: " << graph_id << " in step: " << num_step_ << ".";
  370. StoreRunGraphIdList(graph_id);
  371. SetCurrentAndPrevRootGraph(graph_ptr->root_graph_id());
  372. // multiple graphs
  373. if (graph_proto_list_.size() > 1) {
  374. // there are more than one graphs are not dataset_graph
  375. if (not_dataset_graph_sum_ > 0) {
  376. SendMultiGraphsAndClear(graph_ptr);
  377. }
  378. } else if (graph_proto_list_.size() == 1) {
  379. // single graph, and not the initial step
  380. if (device_target_ == kGPUDevice && !MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT) &&
  381. num_step_ != 0) {
  382. if (debugger_enabled_ && !(run_level_ == "node" && suspended_at_last_kernel_)) {
  383. CommandLoop();
  384. }
  385. debug_services_->ResetLoadedTensors();
  386. }
  387. // In single graph case, reset graph_ptr_ to be nullptr when debugger receives a new graph
  388. if (received_new_graph_) {
  389. graph_ptr_ = nullptr;
  390. CheckGraphPtr(graph_ptr);
  391. }
  392. } else if (debugger_enabled_ && graph_id == rungraph_id_list_.front() && device_target_ == kGPUDevice &&
  393. !MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  394. // Multiple graph, and not the initial step,
  395. // stop only when receive the first sub run graph for each step for old runtime
  396. // if we have stopped for the last kernel before, no need to stop again
  397. if (Common::GetDebugTerminate()) {
  398. return;
  399. }
  400. if (!(run_level_ == "node" && suspended_at_last_kernel_)) {
  401. CommandLoop();
  402. }
  403. debug_services_->ResetLoadedTensors();
  404. }
  405. // resets for the new graph
  406. suspended_at_last_kernel_ = false;
  407. }
  408. /*
  409. * Feature group: Online debugger.
  410. * Target device group: Ascend, GPU.
  411. * Runtime category: Old runtime, MindRT.
  412. * Description: Sends all the subgraphs to online debugger when debugger_enabled_ is true.
  413. */
  414. void Debugger::SendMultiGraphsAndClear(const KernelGraphPtr &graph_ptr) {
  415. // only try to enable debugger if they are not all dataset graphs
  416. if (!debugger_enabled_) {
  417. EnableDebugger();
  418. }
  419. if (debugger_enabled_) {
  420. // only send compiled graphs once at the initial step.
  421. auto dbg_graph_ptr = graph_ptr_;
  422. // use current graph ptr to load parameters
  423. graph_ptr_ = graph_ptr;
  424. LoadParametersAndConst();
  425. // revert graph ptr to original value
  426. graph_ptr_ = dbg_graph_ptr;
  427. SendMultiGraphsAndSuspend(graph_proto_list_);
  428. graph_proto_list_.clear();
  429. received_new_graph_ = false;
  430. }
  431. }
  432. /*
  433. * Feature group: Dump.
  434. * Target device group: Ascend, GPU.
  435. * Runtime category: MindRT.
  436. * Description: Returns the rank_id for GPU and Ascend kernel-bykernel mindRT.
  437. */
  438. uint32_t Debugger::GetRankID() {
  439. auto ms_context = MsContext::GetInstance();
  440. MS_EXCEPTION_IF_NULL(ms_context);
  441. std::string device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  442. uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
  443. const auto &device_context =
  444. device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_target, device_id});
  445. uint32_t rank_id = device_context->GetRankID();
  446. return rank_id;
  447. }
  448. /*
  449. * Feature group: Dump.
  450. * Target device group: Ascend, GPU.
  451. * Runtime category: MindRT.
  452. * Description: When dump is enabled, this function: 1) Dumps parameters for the current root_graph_id to the
  453. * root_graph's directory. 2) Dumps constant data once for each graph. 3) Dumps graph run history for each graph.
  454. */
  455. void Debugger::DumpParamsAndConstAndHistory() {
  456. if (!CheckDebuggerDumpEnabled()) {
  457. return;
  458. }
  459. LoadParametersAllGraphs();
  460. E2eDump::DumpParametersData(GetRankID(), debugger_.get());
  461. // Whether constant data was already dumped for the current root graph.
  462. bool cur_root_graph_checked = std::find(visited_root_graph_ids_.begin(), visited_root_graph_ids_.end(),
  463. cur_root_graph_id_) != visited_root_graph_ids_.end();
  464. for (auto graph : graph_ptr_step_vec_) {
  465. if (!cur_root_graph_checked) {
  466. LoadConstsForGraph(graph);
  467. // Dump constant data for GPU.
  468. E2eDump::DumpConstantData(graph.get(), GetRankID(), debugger_.get());
  469. // Dump constant data for Ascend.
  470. DumpConstantDataAscend(graph);
  471. }
  472. // Dump graph run hisotry for each graph.
  473. E2eDump::DumpRunIter(graph, GetRankID());
  474. }
  475. if (!cur_root_graph_checked) {
  476. visited_root_graph_ids_.push_back(cur_root_graph_id_);
  477. }
  478. }
  479. void Debugger::DumpConstantDataAscend(const KernelGraphPtr &graph) {
  480. if (device_target_ != kAscendDevice) {
  481. return;
  482. }
  483. auto &json_parser = DumpJsonParser::GetInstance();
  484. if (json_parser.e2e_dump_enabled() || json_parser.async_dump_enabled()) {
  485. // Dump constant data for ascend mindRT, for old runtime constant data is dumped in session_basic.
  486. uint32_t rank_id = GetRankID();
  487. std::string cst_file_dir = GenerateDumpPath(graph->root_graph_id(), rank_id, true);
  488. DumpConstantInfo(graph, cst_file_dir);
  489. }
  490. }
  491. /*
  492. * Feature group: Dump.
  493. * Target device group: Ascend, GPU.
  494. * Runtime category: MindRT.
  495. * Description: Dumps a single node for given graph_id.
  496. */
  497. void Debugger::DumpSingleNode(const CNodePtr &node, uint32_t graph_id, const KernelLaunchInfo *launch_info) {
  498. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  499. uint32_t rank_id = GetRankID();
  500. (void)E2eDump::DumpSingleNodeData(node, graph_id, rank_id, debugger_.get(), launch_info);
  501. }
  502. }
  503. /*
  504. * Feature group: Dump.
  505. * Target device group: GPU.
  506. * Runtime category: MindRT.
  507. * Description: This function is used for new GPU runtime using MindRTBackend, on Ascend platform, graphs are saved in
  508. * session_basic.
  509. */
  510. void Debugger::DumpInGraphCompiler(const KernelGraphPtr &kernel_graph) {
  511. if (device_target_ == kAscendDevice) {
  512. return;
  513. }
  514. auto &json_parser = DumpJsonParser::GetInstance();
  515. if (json_parser.e2e_dump_enabled()) {
  516. uint32_t rank_id = GetRankID();
  517. kernel_graph->set_root_graph_id(kernel_graph->graph_id());
  518. std::string final_graph = "trace_code_graph_" + std::to_string(kernel_graph->graph_id());
  519. std::string root_dir = json_parser.path() + "/rank_" + std::to_string(rank_id);
  520. std::string target_dir = root_dir + "/graphs";
  521. std::string ir_file_path = target_dir + "/" + "ms_output_" + final_graph + ".ir";
  522. DumpIRProtoWithSrcInfo(kernel_graph, final_graph, target_dir, kDebugWholeStack);
  523. DumpIR("trace_code_graph", kernel_graph, true, kWholeStack, ir_file_path);
  524. DumpGraphExeOrder("ms_execution_order_graph_" + std::to_string(kernel_graph->graph_id()) + ".csv", root_dir,
  525. kernel_graph->execution_order());
  526. }
  527. }
  528. /*
  529. * Feature group: Dump, Online debugger.
  530. * Target device group: Ascend, GPU and CPU.
  531. * Runtime category: MindRT.
  532. * Description: Load and dump parameters and constant data, call postExecute and update dump iter.
  533. */
  534. void Debugger::PostExecuteGraphDebugger() {
  535. // On CPU, update dump iteration, Parameters and consts are not dumped here
  536. if (device_target_ == kCPUDevice) {
  537. DumpJsonParser::GetInstance().UpdateDumpIter();
  538. return;
  539. }
  540. DumpParamsAndConstAndHistory();
  541. // debug used for dump
  542. if (CheckDebuggerDumpEnabled() && !debugger_enabled()) {
  543. ClearCurrentData();
  544. }
  545. if (debugger_) {
  546. debugger_->PostExecute();
  547. }
  548. E2eDump::UpdateIterMindRTDump();
  549. }
  550. /*
  551. * Feature group: Online debugger.
  552. * Target device group: Ascend, GPU.
  553. * Runtime category: Old runtime, MindRT.
  554. * Description: Send hit watchpoints, update the step number and reset loaded tensors.
  555. */
  556. void Debugger::PostExecute() {
  557. // access lock for public method
  558. std::lock_guard<std::mutex> a_lock(access_lock_);
  559. if (Common::GetDebugTerminate()) {
  560. return;
  561. }
  562. if (debugger_ && debugger_->DebuggerBackendEnabled()) {
  563. // analyze tensor data and send the watchpoints been hit
  564. if (debugger_enabled_ && !is_dataset_graph_) {
  565. SendWatchpoints(CheckWatchpoints());
  566. // no need to suspend at each graph for GPU old runtime, suspension happens in preExecute
  567. if (device_target_ == kAscendDevice) {
  568. CommandLoop();
  569. } else if (device_target_ == kGPUDevice && MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  570. if (!(run_level_ == "node" && suspended_at_last_kernel_)) {
  571. CommandLoop();
  572. }
  573. }
  574. if (device_target_ != kGPUDevice) {
  575. num_step_++;
  576. }
  577. }
  578. // Only keep parameters in th current map
  579. // GPU ResetLoadedTensors for old runtime happens in preExecute
  580. if ((device_target_ == kGPUDevice && MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) ||
  581. device_target_ == kAscendDevice) {
  582. if (debug_services_ != nullptr) {
  583. debug_services_->ResetLoadedTensors();
  584. } else {
  585. MS_LOG(DEBUG) << "debug_services_ is nullptr";
  586. }
  587. }
  588. }
  589. }
  590. bool Debugger::ReadNodeDataRequired(const CNodePtr &kernel) const {
  591. if (debugger_enabled_ && !is_dataset_graph_) {
  592. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, kernel);
  593. // if node has a watchpoint on it, is next_to node, or continue_to node then read the kernel tensor data
  594. if (is_watchpoint || (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_))) {
  595. return true;
  596. }
  597. }
  598. return false;
  599. }
  600. /*
  601. * Feature group: Online debugger.
  602. * Target device group: GPU.
  603. * Runtime category: Old runtime, MindRT.
  604. * Description: Check and send watchpoint hit for a single node, suspend if a watchpoint is hit or we are continuing
  605. * in node level.
  606. */
  607. void Debugger::PostExecuteNode(const CNodePtr &kernel, bool last_kernel) {
  608. // access lock for public method
  609. std::lock_guard<std::mutex> a_lock(access_lock_);
  610. if (Common::GetDebugTerminate()) {
  611. return;
  612. }
  613. if (debugger_enabled_ && !is_dataset_graph_) {
  614. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, kernel);
  615. // if kernel is watchpoint,and get hit. suspend.
  616. bool hit_empty_flag = true;
  617. if (is_watchpoint) {
  618. auto hits = CheckWatchpoints(cur_name_, kernel);
  619. if (!hits.empty()) {
  620. SendWatchpoints(hits);
  621. CommandLoop();
  622. hit_empty_flag = false;
  623. }
  624. }
  625. if (hit_empty_flag && run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_)) {
  626. // if kernel is not watchpoint and is next_to or continue_to node, suspend
  627. // sets a bool to be checked in preExecute to avoid double stopping at last kernel in the last graph
  628. if (last_kernel) {
  629. suspended_at_last_kernel_ = true;
  630. }
  631. CommandLoop();
  632. }
  633. return;
  634. }
  635. }
  636. /*
  637. * Feature group: Dump, Online debugger.
  638. * Target device group: Ascend, GPU.
  639. * Runtime category: Old runtime, MindRT.
  640. * Description: Get graph proto and add it to graph proto list and add loaded graph pointers to a list.
  641. */
  642. void Debugger::LoadGraphs(const KernelGraphPtr &graph_ptr) {
  643. MS_EXCEPTION_IF_NULL(graph_ptr);
  644. if (graph_ptr_ != graph_ptr) {
  645. MS_LOG(INFO) << "LoadGraphs Debugger got new graph: " << graph_ptr->graph_id();
  646. received_new_graph_ = true;
  647. // save new graph_ptr
  648. graph_ptr_ = graph_ptr;
  649. CheckDatasetGraph();
  650. if (!is_dataset_graph_) {
  651. // get proto for new graph_ptr
  652. auto graph_proto = GetGraphProto(graph_ptr);
  653. // add new graph proto to graph_proto_list_
  654. graph_proto_list_.push_back(graph_proto);
  655. graph_ptr_list_.push_back(graph_ptr);
  656. not_dataset_graph_sum_++;
  657. }
  658. // reset is_dataset_graph to be false
  659. is_dataset_graph_ = false;
  660. }
  661. }
  662. // In single graph cases, check single graph ptr
  663. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  664. MS_EXCEPTION_IF_NULL(graph_ptr);
  665. if (graph_ptr_ != graph_ptr) {
  666. MS_LOG(INFO) << "CheckGraphPtr Debugger got new graph: " << graph_ptr->graph_id();
  667. // save new graph_ptr
  668. graph_ptr_ = graph_ptr;
  669. if (!is_dataset_graph_) {
  670. // only try to enable debugger if it is not a dataset graph
  671. if (!debugger_enabled_) {
  672. EnableDebugger();
  673. }
  674. if (debugger_enabled_) {
  675. LoadParametersAndConst();
  676. // get graph proto and send to MindInsight
  677. auto graph_proto = graph_proto_list_.front();
  678. SendGraphAndSuspend(graph_proto);
  679. graph_proto_list_.clear();
  680. received_new_graph_ = false;
  681. }
  682. }
  683. }
  684. }
  685. void Debugger::CheckDatasetGraph() {
  686. // print parameter node names
  687. MS_EXCEPTION_IF_NULL(graph_ptr_);
  688. const auto &params = graph_ptr_->inputs();
  689. for (const auto &param : params) {
  690. MS_LOG(INFO) << "param: " << GetKernelNodeName(param);
  691. }
  692. // check if there is GetNext or InitDataSetQueue node
  693. const auto &nodes = graph_ptr_->execution_order();
  694. for (const auto &node : nodes) {
  695. auto node_name = common::AnfAlgo::GetCNodeName(node);
  696. MS_LOG(INFO) << "node: " << GetKernelNodeName(node);
  697. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  698. MS_LOG(INFO) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  699. << node_name;
  700. is_dataset_graph_ = true;
  701. return;
  702. }
  703. }
  704. is_dataset_graph_ = false;
  705. }
  706. GraphProto Debugger::GetGraphProto(const KernelGraphPtr &graph_ptr) const {
  707. // convert kernel graph to debugger modelproto
  708. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr);
  709. return model.graph();
  710. }
  711. /*
  712. * Feature group: Online debugger.
  713. * Target device group: Ascend, GPU.
  714. * Runtime category: Old runtime, MindRT.
  715. * Description: Send debugger backend heartbeat to online debugger every few seconds.
  716. */
  717. void Debugger::SendHeartbeat(int32_t period) {
  718. int num_heartbeat_fail = 0;
  719. const int max_num_heartbeat_fail = 5;
  720. const int retry_milliseconds = 500;
  721. Heartbeat heartbeat;
  722. heartbeat.set_message("Debugger is alive");
  723. heartbeat.set_period(heartbeat_period_second);
  724. SetEnableHeartbeat(CheckDebuggerEnabled());
  725. while (enable_heartbeat_) {
  726. MS_EXCEPTION_IF_NULL(grpc_client_);
  727. EventReply reply = grpc_client_->SendHeartbeat(heartbeat);
  728. if (reply.status() != EventReply::OK) {
  729. MS_LOG(ERROR) << "Error: SendHeartbeat failed";
  730. num_heartbeat_fail++;
  731. if (num_heartbeat_fail >= max_num_heartbeat_fail) {
  732. MS_LOG(ERROR) << "Maximum number of failure for SendHeartbeat reached : exiting training session.";
  733. SetEnableHeartbeat(false);
  734. break;
  735. } else {
  736. MS_LOG(ERROR) << "Number of consecutive SendHeartbeat fail:" << num_heartbeat_fail;
  737. std::this_thread::sleep_for(std::chrono::milliseconds(retry_milliseconds));
  738. }
  739. } else {
  740. int recheck_period_ms = 200;
  741. for (int i = 0; i < (period * 1000 / recheck_period_ms); i++) {
  742. if (enable_heartbeat_) {
  743. std::this_thread::sleep_for(std::chrono::milliseconds(recheck_period_ms));
  744. } else {
  745. break;
  746. }
  747. }
  748. }
  749. }
  750. }
  751. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  752. if (!CheckSendMetadata()) {
  753. return;
  754. }
  755. // send graph to MindInsight server
  756. MS_EXCEPTION_IF_NULL(grpc_client_);
  757. EventReply reply = grpc_client_->SendGraph(graph_proto);
  758. if (reply.status() != EventReply::OK) {
  759. MS_LOG(ERROR) << "Error: SendGraph failed";
  760. }
  761. // enter command loop, wait and process commands
  762. CommandLoop();
  763. }
  764. bool Debugger::SendMetadata(bool version_check) {
  765. // prepare metadata
  766. MS_EXCEPTION_IF_NULL(graph_ptr_);
  767. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  768. Metadata metadata;
  769. metadata.set_device_name(device_name);
  770. metadata.set_cur_step(num_step_);
  771. metadata.set_backend(device_target_);
  772. metadata.set_cur_node(cur_name_);
  773. metadata.set_training_done(training_done_);
  774. metadata.set_ms_version(version_);
  775. MS_LOG(INFO) << "Is training done?" << training_done_;
  776. // set graph number to not_dataset_graph_sum_
  777. metadata.set_graph_num(not_dataset_graph_sum_);
  778. MS_EXCEPTION_IF_NULL(grpc_client_);
  779. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  780. bool ret = false;
  781. if (reply_metadata.status() == EventReply::OK) {
  782. if (version_check) {
  783. // get type of the command in meta data reply, it should be version matched
  784. DebuggerCommand cmd = GetCommand(reply_metadata);
  785. if (cmd != DebuggerCommand::kVersionMatchedCMD) {
  786. MS_LOG(ERROR) << "MindInsight version is too old, Mindspore version is " << version_;
  787. Exit();
  788. } else {
  789. if (GetMiVersionMatched(reply_metadata)) {
  790. MS_LOG(INFO) << "MindSpore version is " << version_ << " matches MindInsight version.";
  791. ret = true;
  792. } else {
  793. MS_LOG(ERROR) << "MindSpore version " << version_ << ", did not match MindInsight version.";
  794. CommandLoop();
  795. }
  796. }
  797. } else {
  798. // version check is done before so we can just return true here
  799. ret = true;
  800. }
  801. } else {
  802. MS_LOG(ERROR) << "Error: SendMetadata failed";
  803. }
  804. return ret;
  805. }
  806. void Debugger::SendMultiGraphsAndSuspend(const std::list<GraphProto> &graph_proto_list) {
  807. if (!CheckSendMetadata()) {
  808. return;
  809. }
  810. MS_EXCEPTION_IF_NULL(grpc_client_);
  811. // send multiple graphs to mindinght server
  812. // split graph into chunks if one graph is larger than chunk size
  813. std::list<Chunk> chunked_graph_proto_list;
  814. Chunk chunk;
  815. for (auto graph : graph_proto_list) {
  816. std::string str = graph.SerializeAsString();
  817. auto graph_size = graph.ByteSize();
  818. if (graph_size > g_chunk_size) {
  819. auto sub_graph_str = grpc_client_->ChunkString(str, graph_size);
  820. for (unsigned int i = 0; i < sub_graph_str.size(); i++) {
  821. chunk.set_buffer(sub_graph_str[i]);
  822. if (i < sub_graph_str.size() - 1) {
  823. chunk.set_finished(false);
  824. } else {
  825. chunk.set_finished(true);
  826. }
  827. chunked_graph_proto_list.push_back(chunk);
  828. }
  829. } else {
  830. chunk.set_buffer(str);
  831. chunk.set_finished(true);
  832. chunked_graph_proto_list.push_back(chunk);
  833. }
  834. }
  835. EventReply reply = grpc_client_->SendMultiGraphs(chunked_graph_proto_list);
  836. if (reply.status() != EventReply::OK) {
  837. MS_LOG(ERROR) << "Error: SendGraph failed";
  838. }
  839. // enter command loop, wait and process commands
  840. CommandLoop();
  841. }
  842. bool Debugger::CheckSendMetadata() {
  843. if (!send_metadata_done_) {
  844. if (!SendMetadata(true)) {
  845. return false;
  846. }
  847. send_metadata_done_ = true;
  848. }
  849. return true;
  850. }
  851. void Debugger::CommandLoop() {
  852. // prepare metadata
  853. MS_EXCEPTION_IF_NULL(graph_ptr_);
  854. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(cur_root_graph_id_);
  855. Metadata metadata;
  856. metadata.set_device_name(device_name);
  857. metadata.set_cur_step(num_step_);
  858. metadata.set_backend(device_target_);
  859. metadata.set_cur_node(cur_name_);
  860. metadata.set_training_done(training_done_);
  861. // loop exit flag
  862. bool run = false;
  863. int num_wait_fail = 0;
  864. const int max_num_wait_fail = 5;
  865. while (!run) {
  866. // wait for command
  867. MS_EXCEPTION_IF_NULL(grpc_client_);
  868. EventReply reply = grpc_client_->WaitForCommand(metadata);
  869. if (reply.status() != EventReply::OK) {
  870. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  871. num_wait_fail++;
  872. if (num_wait_fail > max_num_wait_fail) {
  873. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session.";
  874. MS_LOG(ERROR) << "Failed to connect to MindInsight debugger server. Please check the config "
  875. "of debugger host and port.";
  876. Exit();
  877. run = true;
  878. } else {
  879. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  880. << num_wait_fail << "s";
  881. std::this_thread::sleep_for(std::chrono::seconds(num_wait_fail));
  882. }
  883. continue;
  884. }
  885. // get type of the command in reply
  886. DebuggerCommand cmd = GetCommand(reply);
  887. if (cmd == DebuggerCommand::kUnknownCMD) {
  888. MS_LOG(DEBUG) << "Debug: debugger received unknown command";
  889. continue;
  890. }
  891. MS_LOG(INFO) << "received command: ";
  892. switch (cmd) {
  893. case DebuggerCommand::kUnknownCMD:
  894. MS_LOG(INFO) << "UnknownCMD";
  895. break;
  896. case DebuggerCommand::kExitCMD:
  897. MS_LOG(INFO) << "ExitCMD";
  898. Exit(true);
  899. // Used for debugger termination
  900. run = true;
  901. break;
  902. case DebuggerCommand::kRunCMD:
  903. ProcessRunCMD(reply);
  904. if (GetRunLevel(reply) != "recheck") {
  905. // exit loop
  906. run = true;
  907. }
  908. break;
  909. case DebuggerCommand::kSetCMD:
  910. ProcessKSetCMD(reply);
  911. break;
  912. case DebuggerCommand::kViewCMD:
  913. ProcessKViewCMD(reply);
  914. break;
  915. case DebuggerCommand::kVersionMatchedCMD:
  916. MS_LOG(ERROR) << "Received unexpected Version Matched CMD from MindInsight.";
  917. Exit();
  918. break;
  919. default:
  920. MS_LOG(ERROR) << "Received unknown CMD from MindInsight";
  921. Exit();
  922. break;
  923. }
  924. }
  925. }
  926. void Debugger::ProcessRunCMD(const EventReply &reply) {
  927. MS_LOG(INFO) << "RunCMD";
  928. if (GetRunLevel(reply) == "recheck") {
  929. MS_LOG(INFO) << "rechecking all watchpoints";
  930. SendWatchpoints(CheckWatchpoints("", nullptr, true));
  931. } else {
  932. // no longer the initial suspension.
  933. initial_suspend_ = false;
  934. // print run cmd content
  935. // get run_level and node_name
  936. run_level_ = GetRunLevel(reply);
  937. node_name_ = GetNodeName(reply);
  938. MS_LOG(INFO) << "run_level: " << run_level_;
  939. MS_LOG(INFO) << "node_name_: " << node_name_;
  940. }
  941. }
  942. void Debugger::ProcessKSetCMD(const EventReply &reply) {
  943. MS_LOG(INFO) << "SetCMD";
  944. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  945. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  946. if (GetWatchpointDelete(reply)) {
  947. MS_LOG(INFO) << "Deleting watchpoint";
  948. RemoveWatchpoint(GetWatchpointID(reply));
  949. } else {
  950. MS_LOG(INFO) << "Setting watchpoint";
  951. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  952. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  953. for (const auto &node : recieved_nodes) {
  954. MS_LOG(INFO) << "node name: " << node.node_name();
  955. MS_LOG(INFO) << "node type: " << node.node_type();
  956. }
  957. ProtoVector<WatchCondition_Parameter> parameters = GetParameters(reply);
  958. for (const auto &parameter : parameters) {
  959. MS_LOG(INFO) << "parameter name: " << parameter.name();
  960. MS_LOG(INFO) << "parameter is disabled: " << parameter.disabled();
  961. MS_LOG(INFO) << "parameter value: " << parameter.value();
  962. }
  963. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply), GetParameters(reply));
  964. }
  965. }
  966. void Debugger::ProcessKViewCMD(const EventReply &reply) {
  967. MS_LOG(INFO) << "ViewCMD";
  968. // print view cmd content
  969. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  970. for (auto received_tensor : received_tensors) {
  971. MS_LOG(INFO) << "tensor node name: " << received_tensor.node_name();
  972. MS_LOG(INFO) << "tensor slot: " << received_tensor.slot();
  973. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << received_tensor.finished() << std::noboolalpha;
  974. MS_LOG(INFO) << "tensor iter: " << received_tensor.iter();
  975. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << received_tensor.truncate() << std::noboolalpha;
  976. }
  977. switch (reply.view_cmd().level()) {
  978. case debugger::ViewCMD_Level::ViewCMD_Level_base:
  979. MS_LOG(INFO) << "Tensor base request.";
  980. ViewBaseLevel(reply);
  981. break;
  982. case debugger::ViewCMD_Level::ViewCMD_Level_statistics:
  983. MS_LOG(INFO) << "Tensor statistics request.";
  984. ViewStatLevel(reply);
  985. break;
  986. case debugger::ViewCMD_Level::ViewCMD_Level_value:
  987. MS_LOG(INFO) << "Tensor value request.";
  988. ViewValueLevel(reply);
  989. break;
  990. default:
  991. MS_LOG(DEBUG) << "Debug: Unknown tensor info level";
  992. break;
  993. }
  994. }
  995. void Debugger::ViewValueLevel(const EventReply &reply) {
  996. MS_LOG(INFO) << "Sending tensors";
  997. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  998. // print view cmd reply
  999. for (auto tensor : tensors) {
  1000. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  1001. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  1002. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  1003. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  1004. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  1005. MS_LOG(INFO) << "tensor dims: ";
  1006. for (auto dim : tensor.dims()) {
  1007. MS_LOG(INFO) << dim << ",";
  1008. }
  1009. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  1010. }
  1011. MS_EXCEPTION_IF_NULL(grpc_client_);
  1012. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  1013. if (send_tensors_reply.status() != debugger::EventReply::OK) {
  1014. MS_LOG(ERROR) << "Error: SendTensors failed";
  1015. }
  1016. }
  1017. void Debugger::ViewStatLevel(const EventReply &reply) {
  1018. std::list<TensorSummary> tensor_stats_list = LoadTensorsStat(GetTensors(reply));
  1019. EventReply send_tensors_stat_reply = grpc_client_->SendTensorStats(tensor_stats_list);
  1020. if (send_tensors_stat_reply.status() != debugger::EventReply::OK) {
  1021. MS_LOG(ERROR) << "Error: SendTensorsStats failed.";
  1022. }
  1023. }
  1024. void Debugger::ViewBaseLevel(const EventReply &reply) {
  1025. std::list<TensorBase> tensor_base_list = LoadTensorsBase(GetTensors(reply));
  1026. EventReply send_tensor_base_reply = grpc_client_->SendTensorBase(tensor_base_list);
  1027. if (send_tensor_base_reply.status() != debugger::EventReply::OK) {
  1028. MS_LOG(ERROR) << "Error: SendTensorsBase failed.";
  1029. }
  1030. }
  1031. void AddTensorProtoInfo(TensorProto *tensor_item, const TensorProto &tensor) {
  1032. tensor_item->set_node_name(tensor.node_name());
  1033. tensor_item->set_slot(tensor.slot());
  1034. tensor_item->set_iter(tensor.iter());
  1035. tensor_item->set_truncate(tensor.truncate());
  1036. tensor_item->clear_tensor_content();
  1037. tensor_item->clear_data_type();
  1038. tensor_item->clear_dims();
  1039. }
  1040. void AddTensorStatInfo(const DebugServices::TensorStat &tensor_stat,
  1041. std::list<TensorSummary> *const tensor_summary_list) {
  1042. if (tensor_summary_list == nullptr) {
  1043. MS_LOG(DEBUG) << "tensor_summary_list is nullptr.";
  1044. return;
  1045. }
  1046. TensorSummary tensor_summary_item;
  1047. TensorBase *tensor_base = tensor_summary_item.mutable_tensor_base();
  1048. tensor_base->set_data_type(tensor_stat.dtype);
  1049. tensor_base->set_data_size((int64_t)tensor_stat.data_size);
  1050. for (auto elem : tensor_stat.shape) {
  1051. tensor_base->add_shape(elem);
  1052. }
  1053. Statistics *tensor_statistics = tensor_summary_item.mutable_statistics();
  1054. tensor_statistics->set_is_bool(tensor_stat.is_bool);
  1055. tensor_statistics->set_max_value(static_cast<float>(tensor_stat.max_value));
  1056. tensor_statistics->set_min_value(static_cast<float>(tensor_stat.min_value));
  1057. tensor_statistics->set_avg_value(static_cast<float>(tensor_stat.avg_value));
  1058. tensor_statistics->set_count(SizeToInt(tensor_stat.count));
  1059. tensor_statistics->set_neg_zero_count(SizeToInt(tensor_stat.neg_zero_count));
  1060. tensor_statistics->set_pos_zero_count(SizeToInt(tensor_stat.pos_zero_count));
  1061. tensor_statistics->set_nan_count(SizeToInt(tensor_stat.nan_count));
  1062. tensor_statistics->set_neg_inf_count(SizeToInt(tensor_stat.neg_inf_count));
  1063. tensor_statistics->set_pos_inf_count(SizeToInt(tensor_stat.pos_inf_count));
  1064. tensor_statistics->set_zero_count(SizeToInt(tensor_stat.zero_count));
  1065. tensor_summary_list->push_back(tensor_summary_item);
  1066. }
  1067. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id,
  1068. const ProtoVector<WatchCondition_Parameter> &parameters) {
  1069. std::vector<std::tuple<std::string, bool>> check_node_list;
  1070. std::vector<DebugServices::parameter_t> parameter_list;
  1071. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  1072. [](const WatchNode &node) -> std::tuple<std::string, bool> {
  1073. return make_tuple(node.node_name(), node.node_type() == "scope");
  1074. });
  1075. std::transform(
  1076. parameters.begin(), parameters.end(), std::back_inserter(parameter_list),
  1077. [](const WatchCondition_Parameter &parameter) -> DebugServices::parameter_t {
  1078. return DebugServices::parameter_t{parameter.name(), parameter.disabled(), parameter.value(), parameter.hit()};
  1079. });
  1080. debug_services_->AddWatchpoint(id, condition.condition(), condition.value(), check_node_list, parameter_list);
  1081. }
  1082. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->RemoveWatchpoint(id); }
  1083. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  1084. std::vector<std::string> name;
  1085. std::vector<std::string> ret_name;
  1086. std::vector<const char *> data_ptr;
  1087. std::vector<ssize_t> data_size;
  1088. std::vector<unsigned int> dtype;
  1089. std::vector<std::vector<int64_t>> shape;
  1090. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  1091. // ret_name will contain tensor names that are found in TensorLoader
  1092. // items in ret_name will be in the same order with tensors if found
  1093. debug_services_->ReadNodesTensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  1094. std::list<TensorProto> tensor_list;
  1095. size_t result_index = 0;
  1096. for (auto tensor : tensors) {
  1097. ssize_t size_iter = 0;
  1098. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  1099. TensorProto tensor_item;
  1100. tensor_item.set_finished(true);
  1101. AddTensorProtoInfo(&tensor_item, tensor);
  1102. tensor_list.push_back(tensor_item);
  1103. continue;
  1104. }
  1105. ssize_t tensor_size = data_size[result_index];
  1106. while (size_iter < tensor_size) {
  1107. ssize_t chunk_size = g_chunk_size;
  1108. TensorProto tensor_item;
  1109. tensor_item.set_finished(false);
  1110. if (tensor_size - size_iter <= g_chunk_size) {
  1111. chunk_size = tensor_size - size_iter;
  1112. tensor_item.set_finished(true);
  1113. }
  1114. AddTensorProtoInfo(&tensor_item, tensor);
  1115. // return empty tensor if didn't find the requested tensor
  1116. tensor_item.set_tensor_content(data_ptr[result_index] + size_iter, chunk_size);
  1117. tensor_item.set_data_type((debugger::DataType)dtype[result_index]);
  1118. for (auto &elem : shape[result_index]) {
  1119. tensor_item.add_dims(elem);
  1120. }
  1121. // add tensor to result list and increment result_index to check next item in ret_name
  1122. tensor_list.push_back(tensor_item);
  1123. if (size_iter > INT_MAX - g_chunk_size) {
  1124. MS_EXCEPTION(ValueError) << size_iter << " + " << g_chunk_size << " would lead to integer overflow!";
  1125. }
  1126. size_iter += g_chunk_size;
  1127. }
  1128. result_index++;
  1129. }
  1130. return tensor_list;
  1131. }
  1132. std::list<TensorBase> Debugger::LoadTensorsBase(const ProtoVector<TensorProto> &tensors) const {
  1133. std::list<TensorBase> tensor_base_list;
  1134. std::vector<std::string> name;
  1135. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  1136. std::vector<std::tuple<std::string, std::shared_ptr<TensorData>>> result_list;
  1137. debug_services_->SearchNodesTensors(name, &result_list);
  1138. for (auto result : result_list) {
  1139. auto tensor = std::get<1>(result);
  1140. if (!tensor || ((cur_root_graph_id_ != tensor->GetRootGraphId()) &&
  1141. MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT))) {
  1142. // tensor was not found or tensor's graph was not executed in the current step, creating empty tensor base.
  1143. TensorBase tensor_base_item;
  1144. tensor_base_item.set_data_size(0);
  1145. tensor_base_item.set_data_type(0);
  1146. tensor_base_item.add_shape(0);
  1147. tensor_base_list.push_back(tensor_base_item);
  1148. continue;
  1149. }
  1150. // tensor was found creating tensor base object.
  1151. TensorBase tensor_base_item;
  1152. tensor_base_item.set_data_size((int64_t)tensor->GetByteSize());
  1153. tensor_base_item.set_data_type((int32_t)tensor->GetType());
  1154. for (auto elem : tensor->GetShape()) {
  1155. tensor_base_item.add_shape(elem);
  1156. }
  1157. tensor_base_list.push_back(tensor_base_item);
  1158. }
  1159. return tensor_base_list;
  1160. }
  1161. std::list<TensorSummary> Debugger::LoadTensorsStat(const ProtoVector<TensorProto> &tensors) const {
  1162. std::list<TensorSummary> tensor_summary_list;
  1163. std::vector<std::string> name;
  1164. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  1165. std::vector<std::tuple<std::string, std::shared_ptr<TensorData>>> result_list;
  1166. debug_services_->SearchNodesTensors(name, &result_list);
  1167. for (auto result : result_list) {
  1168. auto tensor = std::get<1>(result);
  1169. if (!tensor || ((cur_root_graph_id_ != tensor->GetRootGraphId()) &&
  1170. MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT))) {
  1171. // tensor was not found or tensor's graph was not executed in the current step, creating empty tensor summary.
  1172. DebugServices::TensorStat tensor_stat;
  1173. AddTensorStatInfo(tensor_stat, &tensor_summary_list);
  1174. continue;
  1175. }
  1176. // tensor was found creating tensor summary object.
  1177. DebugServices::TensorStat tensor_stat = DebugServices::GetTensorStatistics(tensor);
  1178. AddTensorStatInfo(tensor_stat, &tensor_summary_list);
  1179. }
  1180. return tensor_summary_list;
  1181. }
  1182. std::shared_ptr<TensorData> Debugger::GetTensor(const std::string &tensor_name) const {
  1183. return debug_services_->GetTensor(tensor_name);
  1184. }
  1185. void Debugger::Exit(bool exit_success) {
  1186. // debugger will notify main thread to exit because main thread can only exit at step boundary.
  1187. MS_LOG(INFO) << "Exit Debugger";
  1188. SetEnableHeartbeat(false);
  1189. Common::DebugTerminate(true, exit_success);
  1190. }
  1191. std::list<WatchpointHit> Debugger::CheckWatchpoints(const std::string &watchnode, const CNodePtr &kernel,
  1192. bool recheck) {
  1193. std::vector<std::string> name;
  1194. std::vector<std::string> slot;
  1195. std::vector<int> condition;
  1196. std::vector<unsigned int> watchpoint_id;
  1197. std::vector<std::string> overflow_ops;
  1198. std::vector<std::vector<DebugServices::parameter_t>> parameters;
  1199. std::vector<int32_t> error_codes;
  1200. std::vector<std::shared_ptr<TensorData>> tensor_list;
  1201. if (watchnode.empty()) {
  1202. tensor_list = debug_services_->GetTensor();
  1203. } else {
  1204. tensor_list = debug_services_->GetNodeTensor(kernel);
  1205. }
  1206. DebugServices::ProcessedNPYFiles processed_npy_files;
  1207. MS_LOG(INFO) << "checkwatchpoints call for step " << num_step_;
  1208. debug_services_->CheckWatchpoints(&name, &slot, &condition, &watchpoint_id, &parameters, &error_codes, overflow_ops,
  1209. &processed_npy_files, &tensor_list, initial_suspend_, watchnode.empty(), recheck);
  1210. std::list<WatchpointHit> hits;
  1211. for (unsigned int i = 0; i < name.size(); i++) {
  1212. WatchpointHit hit;
  1213. std::vector<DebugServices::parameter_t> &parameter = parameters[i];
  1214. hit.set_id(watchpoint_id[i]);
  1215. hit.set_error_code(error_codes[i]);
  1216. // here TensorProto act as a tensor indicator, not sending tensor content
  1217. TensorProto *tensor_item = hit.mutable_tensor();
  1218. tensor_item->set_node_name(name[i]);
  1219. tensor_item->set_slot(slot[i]);
  1220. tensor_item->set_finished(true);
  1221. WatchCondition *condition_item = hit.mutable_watch_condition();
  1222. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  1223. for (const auto &p : parameter) {
  1224. auto x = condition_item->mutable_params()->Add();
  1225. x->set_name(p.name);
  1226. x->set_disabled(p.disabled);
  1227. x->set_value(p.value);
  1228. x->set_hit(p.hit);
  1229. x->set_actual_value(p.actual_value);
  1230. }
  1231. hits.push_back(hit);
  1232. }
  1233. return hits;
  1234. }
  1235. void Debugger::SendWatchpoints(const std::list<WatchpointHit> &points) {
  1236. // send info about watchpoint
  1237. if (!points.empty()) {
  1238. MS_EXCEPTION_IF_NULL(grpc_client_);
  1239. EventReply reply = grpc_client_->SendWatchpointHits(points);
  1240. if (reply.status() != EventReply::OK) {
  1241. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  1242. }
  1243. }
  1244. }
  1245. bool Debugger::DumpTensorToFile(const std::string &filepath, bool trans_flag, const std::string &host_fmt,
  1246. const std::string &addr_format, const std::string &tensor_name, size_t slot,
  1247. const std::vector<int64_t> &host_shape, TypeId host_type) const {
  1248. return debug_services_.get()->DumpTensorToFile(filepath, trans_flag, host_fmt, addr_format, tensor_name, slot,
  1249. host_shape, host_type);
  1250. }
  1251. bool Debugger::LoadNewTensor(const std::shared_ptr<TensorData> &tensor, bool keep_prev) {
  1252. if (debug_services_ != nullptr) {
  1253. return debug_services_.get()->LoadNewTensor(tensor, keep_prev);
  1254. }
  1255. return false;
  1256. }
  1257. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  1258. DebuggerCommand GetCommand(const EventReply &reply) {
  1259. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  1260. switch (reply.cmd_case()) {
  1261. case debugger::EventReply::CmdCase::kExit:
  1262. cmd = DebuggerCommand::kExitCMD;
  1263. break;
  1264. case debugger::EventReply::CmdCase::kRunCmd:
  1265. cmd = DebuggerCommand::kRunCMD;
  1266. break;
  1267. case debugger::EventReply::CmdCase::kSetCmd:
  1268. cmd = DebuggerCommand::kSetCMD;
  1269. break;
  1270. case debugger::EventReply::CmdCase::kViewCmd:
  1271. cmd = DebuggerCommand::kViewCMD;
  1272. break;
  1273. case debugger::EventReply::CmdCase::kVersionMatched:
  1274. cmd = DebuggerCommand::kVersionMatchedCMD;
  1275. break;
  1276. default:
  1277. MS_LOG(DEBUG) << "Debug: UnknownCMD";
  1278. break;
  1279. }
  1280. return cmd;
  1281. }
  1282. ProtoVector<WatchCondition_Parameter> GetParameters(const EventReply &reply) {
  1283. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  1284. MS_LOG(ERROR) << "Error: Can not get Parameters from command. Returning default value: ProtoVector<Parameter>().";
  1285. return ProtoVector<WatchCondition_Parameter>();
  1286. }
  1287. return reply.set_cmd().watch_condition().params();
  1288. }
  1289. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  1290. if (!reply.has_set_cmd()) {
  1291. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  1292. return ProtoVector<WatchNode>();
  1293. }
  1294. return reply.set_cmd().watch_nodes();
  1295. }
  1296. std::string GetRunLevel(const EventReply &reply) {
  1297. if (!reply.has_run_cmd()) {
  1298. MS_LOG(ERROR) << "Error: Not RunCMD, can not get RunLevel. Returning default value: "
  1299. "";
  1300. return "";
  1301. }
  1302. return reply.run_cmd().run_level();
  1303. }
  1304. std::string GetNodeName(const EventReply &reply) {
  1305. if (!reply.has_run_cmd()) {
  1306. MS_LOG(ERROR) << "Error: Not RunCMD, can not get NodeName. Returning default value: "
  1307. "";
  1308. return "";
  1309. }
  1310. return reply.run_cmd().node_name();
  1311. }
  1312. WatchCondition GetWatchcondition(const EventReply &reply) {
  1313. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  1314. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  1315. return WatchCondition();
  1316. }
  1317. return reply.set_cmd().watch_condition();
  1318. }
  1319. int32_t GetWatchpointID(const EventReply &reply) {
  1320. if (!reply.has_set_cmd()) {
  1321. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  1322. return 0;
  1323. }
  1324. return reply.set_cmd().id();
  1325. }
  1326. bool GetWatchpointDelete(const EventReply &reply) {
  1327. if (!reply.has_set_cmd()) {
  1328. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  1329. return false;
  1330. }
  1331. return reply.set_cmd().delete_();
  1332. }
  1333. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  1334. if (!reply.has_view_cmd()) {
  1335. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  1336. return ProtoVector<TensorProto>();
  1337. }
  1338. return reply.view_cmd().tensors();
  1339. }
  1340. std::string GetTensorFullName(const TensorProto &tensor) {
  1341. string node_name = tensor.node_name();
  1342. if (tensor.truncate()) {
  1343. // scopes in node name are separated by '/'
  1344. // use the name without scope if truncate is true
  1345. std::size_t found = node_name.find_last_of("/");
  1346. node_name = node_name.substr(found + 1);
  1347. }
  1348. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  1349. }
  1350. bool GetMiVersionMatched(const EventReply &reply) { return reply.version_matched(); }
  1351. bool Debugger::partial_memory() const { return partial_memory_; }
  1352. void Debugger::SetEnableHeartbeat(bool enabled) { enable_heartbeat_ = enabled; }
  1353. void Debugger::SetCurNode(const std::string &cur_name) {
  1354. // access lock for public method
  1355. std::lock_guard<std::mutex> a_lock(access_lock_);
  1356. cur_name_ = cur_name;
  1357. }
  1358. std::string Debugger::run_level() const { return run_level_; }
  1359. void Debugger::SetTrainingDone(bool training_done) { training_done_ = training_done; }
  1360. bool Debugger::CheckPort(const std::string &port) const {
  1361. int num = 0;
  1362. const int min_port_num = 1;
  1363. const int max_port_num = 65535;
  1364. const int decimal = 10;
  1365. if (port[0] == '0' && port[1] != '\0') return false;
  1366. int i = 0;
  1367. while (port[i] != '\0') {
  1368. if (port[i] < '0' || port[i] > '9') return false;
  1369. num = num * decimal + (port[i] - '0');
  1370. if (num > max_port_num) return false;
  1371. i++;
  1372. }
  1373. if (num < min_port_num) return false;
  1374. return true;
  1375. }
  1376. bool Debugger::CheckIp(const std::string &host) const {
  1377. std::regex reg_ip(
  1378. "(25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])"
  1379. "[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
  1380. "[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
  1381. "[.](25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])");
  1382. std::smatch smat;
  1383. std::string host_str = host;
  1384. return std::regex_match(host_str, smat, reg_ip);
  1385. }
  1386. uint32_t Debugger::GetFirstRunGraphId() const { return rungraph_id_list_.front(); }
  1387. /*
  1388. * Feature group: Dump.
  1389. * Target device group: Ascend, GPU.
  1390. * Runtime category: Old runtime, MindRT.
  1391. * Description: Load a single parameter or value node.
  1392. */
  1393. void Debugger::LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output_index, uint32_t root_graph_id) {
  1394. MS_EXCEPTION_IF_NULL(anf_node);
  1395. if (!anf_node->isa<Parameter>() && !anf_node->isa<ValueNode>()) {
  1396. return;
  1397. }
  1398. // When MindRT is used, only ValueNodes and ParameterWeights can be loaded from device to host
  1399. if (MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  1400. if (!anf_node->isa<ValueNode>() &&
  1401. !(anf_node->isa<Parameter>() && common::AnfAlgo::IsParameterWeight(anf_node->cast<ParameterPtr>()))) {
  1402. return;
  1403. }
  1404. }
  1405. // for parameters and value nodes, set its execution order to be 0;
  1406. int exec_order = 0;
  1407. std::string node_name = GetKernelNodeName(anf_node);
  1408. GetFileKernelName(NOT_NULL(&node_name));
  1409. // check if output adde exists, if not, return;
  1410. if (!AnfAlgo::OutputAddrExist(anf_node, output_index)) {
  1411. return;
  1412. }
  1413. auto addr = AnfAlgo::GetOutputAddr(anf_node, output_index);
  1414. MS_EXCEPTION_IF_NULL(addr);
  1415. auto type = common::AnfAlgo::GetOutputInferDataType(anf_node, output_index);
  1416. if (!IsTypeDebuggerSupported(type)) {
  1417. return;
  1418. }
  1419. auto format = kOpFormat_DEFAULT;
  1420. string tensor_name = node_name + ':' + "0";
  1421. ShapeVector int_shapes = trans::GetRuntimePaddingShape(anf_node, output_index);
  1422. bool keep_prev;
  1423. if (anf_node->isa<Parameter>()) {
  1424. keep_prev = true;
  1425. debug_services_->MoveTensorCurrentToPrev(tensor_name);
  1426. } else {
  1427. keep_prev = false;
  1428. }
  1429. bool ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, 0, keep_prev, root_graph_id, false);
  1430. if (!ret) {
  1431. MS_LOG(ERROR) << "LoadMemToHost:"
  1432. << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
  1433. }
  1434. }
  1435. void Debugger::LoadSingleParameterMindRT(const AnfNodePtr &node) {
  1436. MS_EXCEPTION_IF_NULL(node);
  1437. auto root_graph_id = cur_root_graph_id_;
  1438. // This function is only for loading parameters mindRT.
  1439. std::string node_name = GetKernelNodeName(node);
  1440. GetFileKernelName(NOT_NULL(&node_name));
  1441. TypeId type;
  1442. TypeId device_type;
  1443. ShapeVector int_shapes;
  1444. auto device_addr = GetParameterInfo(node, NOT_NULL(&int_shapes), NOT_NULL(&type), NOT_NULL(&device_type));
  1445. if (device_addr == nullptr) {
  1446. MS_LOG(DEBUG) << "Skip node: " << node_name << ". Parameter data is not available for mindRT.";
  1447. return;
  1448. }
  1449. if (!IsTypeDebuggerSupported(type)) {
  1450. return;
  1451. }
  1452. auto format = kOpFormat_DEFAULT;
  1453. string tensor_name = node_name + ':' + "0";
  1454. if (debug_services_ != nullptr) {
  1455. debug_services_->MoveTensorCurrentToPrev(tensor_name);
  1456. }
  1457. // Keep_prev is True for parameters.
  1458. // force update for parameters.
  1459. bool ret = device_addr->LoadMemToHost(tensor_name, 0, format, int_shapes, type, 0, true, root_graph_id, true);
  1460. if (!ret) {
  1461. MS_LOG(ERROR) << "LoadMemToHost:"
  1462. << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
  1463. }
  1464. }
  1465. /*
  1466. * Feature group: Dump, Online debugger.
  1467. * Target device group: Ascend, GPU.
  1468. * Runtime category: Old runtime, MindRT.
  1469. * Description: Load all the parameters and value nodes for the last loaded graph.
  1470. */
  1471. void Debugger::LoadParametersAndConst() {
  1472. if (!(debugger_enabled_ || CheckDebuggerDumpEnabled())) return;
  1473. MS_EXCEPTION_IF_NULL(graph_ptr_);
  1474. // load parameters
  1475. MS_LOG(INFO) << "Start to load Parameters for graph " << graph_ptr_->graph_id() << ".";
  1476. auto root_graph_id = graph_ptr_->root_graph_id();
  1477. const auto &parameters = graph_ptr_->inputs();
  1478. for (auto &item : parameters) {
  1479. LoadSingleAnfnode(item, PARAMETER_OUTPUT_INDEX, root_graph_id);
  1480. }
  1481. // load value nodes
  1482. // get all constant values from the graph
  1483. MS_LOG(INFO) << "Start to load value nodes for graph " << graph_ptr_->graph_id() << ".";
  1484. const auto value_nodes = graph_ptr_->graph_value_nodes();
  1485. for (auto &item : value_nodes) {
  1486. LoadSingleAnfnode(item, VALUE_NODE_OUTPUT_INDEX, root_graph_id);
  1487. }
  1488. }
  1489. /*
  1490. * Feature group: Dump, Online debugger.
  1491. * Target device group: Ascend, GPU.
  1492. * Runtime category: Old runtime, MindRT.
  1493. * Description: Load all the parameters and value nodes for the given graph.
  1494. */
  1495. void Debugger::LoadParametersAndConst(const KernelGraphPtr &graph) {
  1496. if (!(debugger_enabled_ || CheckDebuggerDumpEnabled())) return;
  1497. MS_EXCEPTION_IF_NULL(graph);
  1498. // load parameters
  1499. MS_LOG(INFO) << "Start to load Parameters for graph " << graph->graph_id() << ".";
  1500. auto root_graph_id = graph->root_graph_id();
  1501. const auto &parameters = graph->inputs();
  1502. for (auto &item : parameters) {
  1503. LoadSingleAnfnode(item, PARAMETER_OUTPUT_INDEX, root_graph_id);
  1504. }
  1505. // load value nodes
  1506. // get all constant values from the graph
  1507. MS_LOG(INFO) << "Start to load value nodes for graph " << graph->graph_id() << ".";
  1508. const auto value_nodes = graph->graph_value_nodes();
  1509. for (auto &item : value_nodes) {
  1510. LoadSingleAnfnode(item, VALUE_NODE_OUTPUT_INDEX, root_graph_id);
  1511. }
  1512. }
  1513. /*
  1514. * Feature group: Dump.
  1515. * Target device group: GPU.
  1516. * Runtime category: MindRT.
  1517. * Description: This function is for loading parameters' data from device to host into tensor_list_map_ for GPU dump.
  1518. * Ascend does not use tensor_map_list_ for dump so it is not needed for ascend dump.
  1519. */
  1520. void Debugger::LoadParametersAllGraphs() {
  1521. if (!(device_target_ == kGPUDevice && CheckDebuggerDumpEnabled())) {
  1522. return;
  1523. }
  1524. for (auto &node : parameters_mindRT_) {
  1525. LoadSingleParameterMindRT(node);
  1526. }
  1527. }
  1528. /*
  1529. * Feature group: Dump.
  1530. * Target device group: GPU.
  1531. * Runtime category: MindRT.
  1532. * Description: This function is for loading constant data from device to host into tensor_list_map_ for GPU dump.
  1533. * Ascend does not use tensor_map_list_ for dump so it is not needed for ascend dump.
  1534. */
  1535. void Debugger::LoadConstsForGraph(const KernelGraphPtr &graph) {
  1536. if (!(device_target_ == kGPUDevice && CheckDebuggerDumpEnabled())) {
  1537. return;
  1538. }
  1539. // load value nodes
  1540. // get all constant values from the graph
  1541. MS_LOG(INFO) << "Start to load value nodes for graph " << graph->graph_id() << ".";
  1542. auto root_graph_id = graph->root_graph_id();
  1543. const auto value_nodes = graph->graph_value_nodes();
  1544. for (auto &item : value_nodes) {
  1545. LoadSingleAnfnode(item, VALUE_NODE_OUTPUT_INDEX, root_graph_id);
  1546. }
  1547. }
  1548. /*
  1549. * Feature group: Online debugger.
  1550. * Target device group: Ascend.
  1551. * Runtime category: Old runtime, MindRT.
  1552. * Description: Load all the kernels for the last loaded graph.
  1553. */
  1554. void Debugger::LoadGraphOutputs() {
  1555. if (!(debugger_enabled() && device_target_ == kAscendDevice)) return;
  1556. MS_EXCEPTION_IF_NULL(graph_ptr_);
  1557. const auto &apply_kernels = graph_ptr_->execution_order();
  1558. auto root_graph_id = graph_ptr_->root_graph_id();
  1559. // for kernels, execution order starts from 1
  1560. int exec_order = 1;
  1561. for (const auto &node : apply_kernels) {
  1562. MS_EXCEPTION_IF_NULL(node);
  1563. std::string kernel_name = GetKernelNodeName(node);
  1564. auto output_size = common::AnfAlgo::GetOutputTensorNum(node);
  1565. if (partial_memory_) {
  1566. if (!debug_services_->IsWatchPoint(kernel_name, node)) {
  1567. continue;
  1568. }
  1569. }
  1570. for (size_t j = 0; j < output_size; ++j) {
  1571. if (!AnfAlgo::OutputAddrExist(node, j)) {
  1572. MS_LOG(INFO) << "Cannot find output addr for slot " << j << " for " << kernel_name;
  1573. continue;
  1574. }
  1575. auto addr = AnfAlgo::GetOutputAddr(node, j);
  1576. MS_EXCEPTION_IF_NULL(addr);
  1577. auto type = common::AnfAlgo::GetOutputInferDataType(node, j);
  1578. if (!IsTypeDebuggerSupported(type)) {
  1579. continue;
  1580. }
  1581. auto format = kOpFormat_DEFAULT;
  1582. string tensor_name = kernel_name + ':' + std::to_string(j);
  1583. ShapeVector int_shapes = trans::GetRuntimePaddingShape(node, j);
  1584. auto ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false, root_graph_id, false);
  1585. if (!ret) {
  1586. MS_LOG(ERROR) << "LoadMemToHost:"
  1587. << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
  1588. }
  1589. }
  1590. exec_order = exec_order + 1;
  1591. }
  1592. }
  1593. /*
  1594. * Feature group: Online debugger.
  1595. * Target device group: GPU.
  1596. * Runtime category: Old runtime.
  1597. * Description: Update step number if we are processing the first graph (to support multigraph).
  1598. */
  1599. void Debugger::UpdateStepNum(const session::KernelGraph *graph) {
  1600. MS_EXCEPTION_IF_NULL(graph);
  1601. MS_EXCEPTION_IF_NULL(debugger_);
  1602. if (device_target_ == kGPUDevice && (debugger_enabled_ || device::KernelRuntime::DumpDataEnabledIteration()) &&
  1603. (graph->graph_id() == debugger_->GetFirstRunGraphId())) {
  1604. // access lock for public method
  1605. std::lock_guard<std::mutex> a_lock(access_lock_);
  1606. ++num_step_;
  1607. }
  1608. }
  1609. /*
  1610. * Feature group: Online debugger.
  1611. * Target device group: GPU.
  1612. * Runtime category: MindRT.
  1613. * Description: Update step number when DebugActor::DebugOnStepEnd is called at the end of each step.
  1614. */
  1615. void Debugger::UpdateStepNumGPU() {
  1616. auto &dump_json_parser = DumpJsonParser::GetInstance();
  1617. if (device_target_ == kGPUDevice && (debugger_enabled_ || dump_json_parser.DumpEnabledForIter())) {
  1618. // access lock for public method
  1619. std::lock_guard<std::mutex> a_lock(access_lock_);
  1620. ++num_step_;
  1621. MS_LOG(DEBUG) << "Update step for GPU, current step: " << num_step_;
  1622. }
  1623. }
  1624. void Debugger::ClearCurrentData() {
  1625. if ((device_target_ == kGPUDevice) && (debugger_enabled_ || device::KernelRuntime::DumpDataEnabledIteration())) {
  1626. if (debug_services_) {
  1627. debug_services_->EmptyCurrentTensor();
  1628. } else {
  1629. MS_LOG(ERROR) << "debug_services_ is nullptr";
  1630. }
  1631. }
  1632. }
  1633. bool Debugger::TensorExistsInCurrent(const std::string &tensor_name) {
  1634. if (debug_services_ != nullptr) {
  1635. return debug_services_->TensorExistsInCurrent(tensor_name);
  1636. }
  1637. return false;
  1638. }
  1639. #ifdef ENABLE_D
  1640. /*
  1641. * Feature group: Dump.
  1642. * Target device group: Ascend.
  1643. * Runtime category: Old runtime, MindRT.
  1644. * Description: Load DumpDataBuilder object from dump_data_construct_map_ for tracking data chunks of node_name. It's
  1645. * for Ascend a + m dump. If not found, create a new one for it and add to dump_data_construct_map_.
  1646. */
  1647. std::shared_ptr<DumpDataBuilder> Debugger::LoadDumpDataBuilder(const std::string &node_name) {
  1648. auto iter = dump_data_construct_map_.find(node_name);
  1649. if (iter == dump_data_construct_map_.end()) {
  1650. dump_data_construct_map_[node_name] = std::make_shared<DumpDataBuilder>();
  1651. }
  1652. return dump_data_construct_map_[node_name];
  1653. }
  1654. void Debugger::ClearDumpDataBuilder(const std::string &node_name) { (void)dump_data_construct_map_.erase(node_name); }
  1655. /*
  1656. * Feature group: Dump.
  1657. * Target device group: Ascend.
  1658. * Runtime category: MindRT.
  1659. * Description: This function is used for A+M dump to make sure training processing ends after tensor data have been
  1660. * dumped to disk completely. Check if dump_data_construct_map_ is empty to see if no dump task is alive. If not, sleep
  1661. * for 500ms and check again.
  1662. */
  1663. void Debugger::WaitForWriteFileFinished() {
  1664. const int kRetryTimeInMilliseconds = 500;
  1665. const int kMaxRecheckCount = 10;
  1666. int recheck_cnt = 0;
  1667. while (recheck_cnt < kMaxRecheckCount && !dump_data_construct_map_.empty()) {
  1668. MS_LOG(INFO) << "Sleep for " << std::to_string(kRetryTimeInMilliseconds)
  1669. << " ms to wait for dumping files to finish. Retry count: " << std::to_string(recheck_cnt + 1) << "/"
  1670. << std::to_string(kMaxRecheckCount);
  1671. std::this_thread::sleep_for(std::chrono::milliseconds(kRetryTimeInMilliseconds));
  1672. recheck_cnt++;
  1673. }
  1674. }
  1675. #endif
  1676. } // namespace mindspore