You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 51 kB

4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <dirent.h>
  17. #include <cstdio>
  18. #include <fstream>
  19. #include <tuple>
  20. #include <vector>
  21. #include <algorithm>
  22. #include <iostream>
  23. #include <cstring>
  24. #include <utility>
  25. #include <map>
  26. #include <regex>
  27. #include "debug/debugger/debugger.h"
  28. #include "debug/data_dump/dump_json_parser.h"
  29. #include "pipeline/jit/pipeline.h"
  30. #include "backend/session/anf_runtime_algorithm.h"
  31. #include "runtime/device/kernel_runtime_manager.h"
  32. #include "runtime/device/kernel_runtime.h"
  33. #include "debug/data_dump/e2e_dump.h"
  34. #include "utils/config_manager.h"
  35. #include "debug/env_config_parser.h"
  36. #include "utils/comm_manager.h"
  37. #include "runtime/hardware/device_context_manager.h"
  38. #include "debug/anf_ir_dump.h"
  39. #include "debug/anf_ir_utils.h"
  40. #ifdef ENABLE_DEBUGGER
  41. #include "debug/debugger/proto_exporter.h"
  42. #else
  43. #include "debug/debugger/proto_exporter_stub.h"
  44. #endif
  45. using debugger::Chunk;
  46. using debugger::EventReply;
  47. using debugger::GraphProto;
  48. using debugger::ModelProto;
  49. using debugger::TensorProto;
  50. using debugger::WatchCondition;
  51. using debugger::WatchCondition_Condition_inf;
  52. using debugger::WatchCondition_Condition_nan;
  53. using debugger::WatchCondition_Parameter;
  54. using debugger::WatchNode;
  55. using debugger::WatchpointHit;
  56. namespace mindspore {
  57. static constexpr auto g_chunk_size = 1024 * 1024 * 3;
  58. DebuggerPtr Debugger::debugger_ = nullptr;
  59. std::mutex Debugger::instance_lock_;
  60. Debugger::Debugger()
  61. : grpc_client_(nullptr),
  62. debug_services_(nullptr),
  63. device_id_(0),
  64. device_target_(""),
  65. num_step_(0),
  66. debugger_enabled_(false),
  67. suspended_at_last_kernel_(false),
  68. run_level_(""),
  69. node_name_(""),
  70. cur_name_(""),
  71. training_done_(false),
  72. is_dataset_graph_(false),
  73. partial_memory_(false),
  74. last_overflow_bin_(0),
  75. initial_suspend_(true),
  76. not_dataset_graph_sum_(0),
  77. version_("") {
  78. CheckDebuggerEnabledParam();
  79. auto ms_context = MsContext::GetInstance();
  80. MS_EXCEPTION_IF_NULL(ms_context);
  81. std::string device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  82. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  83. if (device_target == kCPUDevice) {
  84. MS_LOG(WARNING) << "Not enabling debugger. Debugger does not support CPU.";
  85. } else if (CheckDebuggerEnabled()) {
  86. // configure partial memory reuse
  87. partial_memory_ = CheckDebuggerPartialMemoryEnabled();
  88. // switch memory reuse on or off
  89. EnvConfigParser::GetInstance().SetSysMemreuse(partial_memory_);
  90. // print some message about memory reuse to user
  91. if (partial_memory_) {
  92. MS_LOG(WARNING)
  93. << "Partial Memory Reuse is enabled. Note: 1. Please only set watchpoints before running the first "
  94. "step. 2. Tensor values are only available for nodes that are watched by any watchpoint.";
  95. } else {
  96. MS_LOG(WARNING)
  97. << "Memory Reuse is disabled. Set environment variable MS_DEBUGGER_PARTIAL_MEM=1 to reduce memory "
  98. "usage for large models.";
  99. }
  100. }
  101. }
  102. void Debugger::Init(const uint32_t device_id, const std::string device_target) {
  103. // access lock for public method
  104. std::lock_guard<std::mutex> a_lock(access_lock_);
  105. // save device_id
  106. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  107. device_id_ = device_id;
  108. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  109. device_target_ = device_target;
  110. version_ = "1.3.0";
  111. }
  112. bool IsTypeDebuggerSupported(TypeId type) {
  113. if (type < TypeId::kNumberTypeEnd && type > TypeId::kNumberTypeBegin && type != kNumberTypeComplex64) {
  114. return true;
  115. } else {
  116. MS_LOG(INFO) << "Debugger does not support type: " << TypeIdLabel(type);
  117. return false;
  118. }
  119. }
  120. void Debugger::EnableDebugger() {
  121. // reset some of the class members
  122. num_step_ = 0;
  123. debugger_enabled_ = false;
  124. partial_memory_ = false;
  125. grpc_client_ = nullptr;
  126. debug_services_ = nullptr;
  127. // see if dump using debugger backend is enabled
  128. bool dump_enabled = CheckDebuggerDumpEnabled();
  129. MS_LOG(INFO) << "dump using debugger backend = " << dump_enabled;
  130. // check if debugger enabled
  131. debugger_enabled_ = CheckDebuggerEnabled();
  132. MS_LOG(INFO) << "debugger_enabled_ = " << debugger_enabled_;
  133. if (!debugger_enabled_ && !dump_enabled) {
  134. MS_LOG(INFO) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  135. return;
  136. }
  137. if (debugger_enabled_) {
  138. // configure grpc host
  139. std::string env_host_str = common::GetEnv("MS_DEBUGGER_HOST");
  140. std::string host;
  141. if (!env_host_str.empty()) {
  142. if (CheckIp(env_host_str)) {
  143. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  144. host = env_host_str;
  145. } else {
  146. debugger_enabled_ = false;
  147. MS_EXCEPTION(ValueError) << "Environment variable MS_DEBUGGER_HOST isn't a valid IP address. "
  148. "Please set environment variable MS_DEBUGGER_HOST=x.x.x.x to a valid IP";
  149. }
  150. } else {
  151. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  152. host = "localhost";
  153. }
  154. // configure grpc port
  155. std::string env_port_str = common::GetEnv("MS_DEBUGGER_PORT");
  156. std::string port;
  157. if (!env_port_str.empty()) {
  158. if (CheckPort(env_port_str)) {
  159. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  160. port = env_port_str;
  161. } else {
  162. debugger_enabled_ = false;
  163. MS_EXCEPTION(ValueError) << "Environment variable MS_DEBUGGER_PORT is not valid. Custom port ranging from 1 to "
  164. "65535";
  165. }
  166. } else {
  167. port = "50051";
  168. if (!CheckPort(port)) {
  169. MS_EXCEPTION(ValueError) << "Default MS_DEBUGGER_PORT is not valid. Custom port ranging from 1 to 65535";
  170. }
  171. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  172. }
  173. // initialize grpc client
  174. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  175. }
  176. debug_services_ = std::make_unique<DebugServices>();
  177. }
  178. void Debugger::CheckDatasetSinkMode() {
  179. if (CheckDebuggerDumpEnabled() && ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) {
  180. MS_EXCEPTION(NotSupportError)
  181. << "e2e_dump not supported on GPU with dataset_sink_mode=True. Please set dataset_sink_mode=False";
  182. }
  183. if (CheckDebuggerEnabled() && ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) {
  184. MS_EXCEPTION(NotSupportError)
  185. << "Debugger is not supported with dataset_sink_mode=True. Please set dataset_sink_mode=False";
  186. }
  187. }
  188. bool Debugger::CheckDebuggerDumpEnabled() const {
  189. // see if dump is enabled
  190. if (device_target_ == kGPUDevice) {
  191. return device::KernelRuntime::DumpDataEnabled();
  192. }
  193. return false;
  194. }
  195. bool Debugger::CheckDebuggerEnabled() const {
  196. // get env variables to configure debugger
  197. std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
  198. if (!env_enable_str.empty()) {
  199. (void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
  200. if ((env_enable_str == "1" || env_enable_str == "true") && device_target_ != kCPUDevice) {
  201. return true;
  202. }
  203. }
  204. return false;
  205. }
  206. void Debugger::CheckDebuggerEnabledParam() const {
  207. // check the value of env variable ENABLE_MS_DEBUGGER
  208. std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
  209. if (!env_enable_str.empty()) {
  210. (void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
  211. if (env_enable_str != "0" && env_enable_str != "1" && env_enable_str != "false" && env_enable_str != "true") {
  212. MS_LOG(WARNING) << "Env variable ENABLE_MS_DEBUGGER should be True/False/1/0 (case insensitive), but get: "
  213. << env_enable_str;
  214. }
  215. }
  216. }
  217. bool Debugger::CheckDebuggerPartialMemoryEnabled() const {
  218. std::string env_partial_mem_str = common::GetEnv("MS_DEBUGGER_PARTIAL_MEM");
  219. if (!env_partial_mem_str.empty()) {
  220. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
  221. if (env_partial_mem_str == "1") {
  222. return true;
  223. }
  224. }
  225. return false;
  226. }
  227. bool Debugger::DebuggerBackendEnabled() const { return CheckDebuggerDumpEnabled() || CheckDebuggerEnabled(); }
  228. void Debugger::Reset() {
  229. // access lock for public method
  230. std::lock_guard<std::mutex> a_lock(access_lock_);
  231. // reset components
  232. device_id_ = 0;
  233. device_target_ = "";
  234. num_step_ = 0;
  235. debugger_enabled_ = false;
  236. is_dataset_graph_ = false;
  237. partial_memory_ = false;
  238. graph_ptr_ = nullptr;
  239. grpc_client_ = nullptr;
  240. debug_services_ = nullptr;
  241. last_overflow_bin_ = 0;
  242. overflow_bin_path_.clear();
  243. stream_task_to_opname_.clear();
  244. graph_proto_list_.clear();
  245. graph_ptr_list_.clear();
  246. }
  247. void Debugger::PreExecuteGraphDebugger(const std::vector<KernelGraphPtr> &graphs) {
  248. // Only GPU is supported for MindRTBackend
  249. if (device_target_ != kGPUDevice) {
  250. return;
  251. }
  252. for (size_t graph_index = 0; graph_index < graphs.size(); ++graph_index) {
  253. const auto &graph = graphs[graph_index];
  254. if (debugger_) {
  255. debugger_->PreExecute(graph);
  256. }
  257. DumpSetup(graph);
  258. }
  259. }
  260. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  261. // access lock for public method
  262. std::lock_guard<std::mutex> a_lock(access_lock_);
  263. CheckDatasetSinkMode();
  264. auto graph_id = graph_ptr->graph_id();
  265. // collect rungrap_ids to update step number in multigraph case
  266. if (!rungraph_id_list_.size()) {
  267. rungraph_id_list_.push_back(graph_id);
  268. } else {
  269. if (std::find(rungraph_id_list_.begin(), rungraph_id_list_.end(), graph_id) == rungraph_id_list_.end()) {
  270. rungraph_id_list_.push_back(graph_id);
  271. }
  272. }
  273. // multiple graphs
  274. if (graph_proto_list_.size() > 1) {
  275. // there are more than one graphs are not dataset_graph
  276. if (not_dataset_graph_sum_ > 0) {
  277. // only try to enable debugger if they are not all dataset graphs
  278. if (!debugger_enabled_) {
  279. EnableDebugger();
  280. }
  281. if (debugger_enabled_) {
  282. // only send compiled graphs once at the initial step.
  283. auto dbg_graph_ptr = graph_ptr_;
  284. // use current graph ptr to load parameters
  285. graph_ptr_ = graph_ptr;
  286. LoadParametersAndConst();
  287. // revert graph ptr to original value
  288. graph_ptr_ = dbg_graph_ptr;
  289. SendMultiGraphsAndSuspend(graph_proto_list_);
  290. graph_proto_list_.clear();
  291. }
  292. }
  293. } else if (graph_proto_list_.size() == 1) {
  294. // single graph, and not the initial step
  295. if (device_target_ == kGPUDevice && num_step_ != 0) {
  296. if (debugger_enabled_ && !(run_level_ == "node" && suspended_at_last_kernel_)) {
  297. CommandLoop();
  298. }
  299. debug_services_->ResetLoadedTensors();
  300. }
  301. // In single graph case, reset graph_ptr_ to be nullptr for the initial step
  302. if (num_step_ == 0) {
  303. graph_ptr_ = nullptr;
  304. CheckGraphPtr(graph_ptr);
  305. }
  306. } else if (debugger_enabled_ && graph_id == rungraph_id_list_.front() && device_target_ == kGPUDevice) {
  307. // Multiple graph, and not the initial step,
  308. // stop only when receive the first sub run graph for each step
  309. // if we have stopped for the last kernel before, no need to stop again
  310. if (pipeline::ExecutorPy::GetDebugTerminate()) {
  311. return;
  312. }
  313. if (!(run_level_ == "node" && suspended_at_last_kernel_)) {
  314. CommandLoop();
  315. }
  316. debug_services_->ResetLoadedTensors();
  317. }
  318. // resets for the new graph
  319. suspended_at_last_kernel_ = false;
  320. }
  321. bool Debugger::DumpDataEnabledIteration() const {
  322. auto &dump_json_parser = DumpJsonParser::GetInstance();
  323. if (!dump_json_parser.e2e_dump_enabled()) {
  324. return false;
  325. }
  326. auto cur_iter = dump_json_parser.cur_dump_iter();
  327. if (dump_json_parser.IsDumpIter(cur_iter)) {
  328. return true;
  329. }
  330. return false;
  331. }
  332. uint32_t Debugger::GetRankID() {
  333. auto ms_context = MsContext::GetInstance();
  334. MS_EXCEPTION_IF_NULL(ms_context);
  335. std::string device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
  336. uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
  337. const auto &device_context =
  338. device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_target, device_id});
  339. uint32_t rank_id = device_context->GetRankID();
  340. return rank_id;
  341. }
  342. void Debugger::Dump(const KernelGraphPtr &kernel_graph) const {
  343. uint32_t rank_id = GetRankID();
  344. if (debugger_->DebuggerBackendEnabled()) {
  345. MS_EXCEPTION_IF_NULL(kernel_graph);
  346. (void)E2eDump::DumpParametersAndConstData(kernel_graph.get(), rank_id, debugger_.get());
  347. } else {
  348. DumpJsonParser::GetInstance().UpdateDumpIter();
  349. }
  350. }
  351. void Debugger::DumpSingleNode(const CNodePtr &node, uint32_t graph_id) {
  352. if (debugger_->DebuggerBackendEnabled()) {
  353. uint32_t rank_id = GetRankID();
  354. (void)E2eDump::DumpSingleNodeData(node, graph_id, rank_id, debugger_.get());
  355. }
  356. }
  357. void Debugger::DumpSetup(const KernelGraphPtr &kernel_graph) const {
  358. MS_LOG(INFO) << "Start!";
  359. uint32_t rank_id = GetRankID();
  360. MS_EXCEPTION_IF_NULL(kernel_graph);
  361. E2eDump::DumpSetup(kernel_graph.get(), rank_id);
  362. MS_LOG(INFO) << "Finish!";
  363. }
  364. void Debugger::DumpInGraphCompiler(const KernelGraphPtr &kernel_graph) {
  365. // This function will be called for new GPU runtime using MindRTBackend
  366. auto &json_parser = DumpJsonParser::GetInstance();
  367. if (json_parser.e2e_dump_enabled()) {
  368. uint32_t rank_id = GetRankID();
  369. kernel_graph->set_root_graph_id(kernel_graph->graph_id());
  370. std::string final_graph = "trace_code_graph_" + std::to_string(kernel_graph->graph_id());
  371. std::string root_dir = json_parser.path() + "/rank_" + std::to_string(rank_id);
  372. std::string target_dir = root_dir + "/graphs";
  373. std::string ir_file_path = target_dir + "/" + "ms_output_" + final_graph + ".ir";
  374. DumpIRProtoWithSrcInfo(kernel_graph, final_graph, target_dir, kDebugWholeStack);
  375. DumpIR("trace_code_graph", kernel_graph, true, kWholeStack, ir_file_path);
  376. DumpGraphExeOrder("ms_execution_order_graph_" + std::to_string(kernel_graph->graph_id()) + ".csv", root_dir,
  377. kernel_graph->execution_order());
  378. }
  379. }
  380. void Debugger::PostExecuteGraphDebugger() {
  381. // On CPU, update dump iteration, Parameters and consts are not dumped here
  382. if (device_target_ == kCPUDevice) {
  383. DumpJsonParser::GetInstance().UpdateDumpIter();
  384. return;
  385. }
  386. // Only GPU is supported for MindRTBackend
  387. if (device_target_ != kGPUDevice) {
  388. return;
  389. }
  390. // LoadParametersAndConst for all the graphs
  391. for (auto graph : graph_ptr_list_) {
  392. debugger_->LoadParametersAndConst(graph);
  393. }
  394. // debug used for dump
  395. if (debugger_ && debugger_->CheckDebuggerDumpEnabled()) {
  396. // Dump Parameters and consts
  397. for (auto graph : graph_ptr_list_) {
  398. debugger_->Dump(graph);
  399. if (!debugger_->debugger_enabled()) {
  400. debugger_->ClearCurrentData();
  401. }
  402. }
  403. }
  404. if (debugger_) {
  405. debugger_->PostExecute();
  406. }
  407. }
  408. void Debugger::PostExecute() {
  409. // access lock for public method
  410. std::lock_guard<std::mutex> a_lock(access_lock_);
  411. if (pipeline::ExecutorPy::GetDebugTerminate()) {
  412. return;
  413. }
  414. if (debugger_->DebuggerBackendEnabled()) {
  415. // analyze tensor data and send the watchpoints been hit
  416. if (debugger_enabled_ && !is_dataset_graph_) {
  417. if (device_target_ != kGPUDevice) {
  418. num_step_++;
  419. }
  420. SendWatchpoints(CheckWatchpoints());
  421. // no need to suspend at each graph for GPU, suspension happens in preExecute
  422. if (device_target_ != kGPUDevice) {
  423. CommandLoop();
  424. }
  425. }
  426. // Only keep parameters in the current map
  427. // GPU ResetLoadedTensors happens in preExecute
  428. if (device_target_ != kGPUDevice) {
  429. debug_services_->ResetLoadedTensors();
  430. }
  431. }
  432. }
  433. bool Debugger::ReadNodeDataRequired(const CNodePtr &kernel) const {
  434. if (debugger_enabled_ && !is_dataset_graph_) {
  435. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, kernel);
  436. // if node has a watchpoint on it, is next_to node, or continue_to node then read the kernel tensor data
  437. if (is_watchpoint || (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_))) {
  438. return true;
  439. }
  440. }
  441. return false;
  442. }
  443. void Debugger::PostExecuteNode(const CNodePtr &kernel, bool last_kernel) {
  444. // access lock for public method
  445. std::lock_guard<std::mutex> a_lock(access_lock_);
  446. if (pipeline::ExecutorPy::GetDebugTerminate()) {
  447. return;
  448. }
  449. if (debugger_enabled_ && !is_dataset_graph_) {
  450. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, kernel);
  451. // if kernel is watchpoint,and get hit. suspend.
  452. bool hit_empty_flag = true;
  453. if (is_watchpoint) {
  454. auto hits = CheckWatchpoints(cur_name_, kernel);
  455. if (!hits.empty()) {
  456. SendWatchpoints(hits);
  457. CommandLoop();
  458. hit_empty_flag = false;
  459. }
  460. }
  461. if (hit_empty_flag && run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_)) {
  462. // if kernel is not watchpoint and is next_to or continue_to node, suspend
  463. // sets a bool to be checked in preExecute to avoid double stopping at last kernel in the last graph
  464. if (last_kernel) {
  465. suspended_at_last_kernel_ = true;
  466. }
  467. CommandLoop();
  468. }
  469. return;
  470. }
  471. }
  472. void Debugger::PostDebugOp() {
  473. // access lock for public method
  474. std::lock_guard<std::mutex> a_lock(access_lock_);
  475. // suspend if debugger is enabled
  476. if (debugger_enabled_ && !is_dataset_graph_) {
  477. MS_LOG(INFO) << "Debugger suspend at debug_op";
  478. CommandLoop();
  479. }
  480. }
  481. void Debugger::SetStreamTaskToOpnameMap(const std::map<std::pair<uint32_t, uint32_t>, std::string> &mapping) {
  482. MS_LOG(INFO) << "SetStreamTaskToOpnameMap start";
  483. for (auto const &item : mapping) {
  484. MS_LOG(INFO) << "stream = " << item.first.first << ", task = " << item.first.second
  485. << ", op_name = " << item.second << std::endl;
  486. }
  487. MS_LOG(INFO) << "SetStreamTaskToOpnameMap end";
  488. stream_task_to_opname_ = mapping;
  489. }
  490. void Debugger::LoadGraphs(const KernelGraphPtr &graph_ptr) {
  491. if (graph_ptr_ != graph_ptr) {
  492. MS_LOG(INFO) << "LoadGraphs Debugger got new graph: " << graph_ptr->graph_id();
  493. // save new graph_ptr
  494. graph_ptr_ = graph_ptr;
  495. CheckDatasetGraph();
  496. if (!is_dataset_graph_) {
  497. // get proto for new graph_ptr
  498. auto graph_proto = GetGraphProto(graph_ptr);
  499. // add new graph proto to graph_proto_list_
  500. graph_proto_list_.push_back(graph_proto);
  501. graph_ptr_list_.push_back(graph_ptr);
  502. not_dataset_graph_sum_++;
  503. }
  504. // reset is_dataset_graph to be false
  505. is_dataset_graph_ = false;
  506. }
  507. }
  508. // In single graph cases, check single graph ptr
  509. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  510. if (graph_ptr_ != graph_ptr) {
  511. MS_LOG(INFO) << "CheckGraphPtr Debugger got new graph: " << graph_ptr->graph_id();
  512. // save new graph_ptr
  513. graph_ptr_ = graph_ptr;
  514. if (!is_dataset_graph_) {
  515. // only try to enable debugger if it is not a dataset graph
  516. EnableDebugger();
  517. if (debugger_enabled_) {
  518. LoadParametersAndConst();
  519. // get graph proto and send to Mindinsight
  520. auto graph_proto = graph_proto_list_.front();
  521. SendGraphAndSuspend(graph_proto);
  522. }
  523. }
  524. }
  525. }
  526. void Debugger::CheckDatasetGraph() {
  527. // print parameter node names
  528. const auto &params = graph_ptr_->inputs();
  529. for (const auto &param : params) {
  530. MS_LOG(INFO) << "param: " << GetKernelNodeName(param);
  531. }
  532. // check if there is GetNext or InitDataSetQueue node
  533. const auto &nodes = graph_ptr_->execution_order();
  534. for (const auto &node : nodes) {
  535. auto node_name = AnfAlgo::GetCNodeName(node);
  536. MS_LOG(INFO) << "node: " << GetKernelNodeName(node);
  537. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  538. MS_LOG(INFO) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  539. << node_name;
  540. is_dataset_graph_ = true;
  541. return;
  542. }
  543. }
  544. is_dataset_graph_ = false;
  545. }
  546. GraphProto Debugger::GetGraphProto(const KernelGraphPtr &graph_ptr) const {
  547. // convert kernel graph to debugger modelproto
  548. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr);
  549. return model.graph();
  550. }
  551. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  552. if (SendMetadata(true)) {
  553. // send graph to Mindinsight server
  554. EventReply reply = grpc_client_->SendGraph(graph_proto);
  555. if (reply.status() != reply.OK) {
  556. MS_LOG(ERROR) << "Error: SendGraph failed";
  557. }
  558. // enter command loop, wait and process commands
  559. CommandLoop();
  560. }
  561. }
  562. bool Debugger::SendMetadata(bool version_check) {
  563. // prepare metadata
  564. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  565. Metadata metadata;
  566. metadata.set_device_name(device_name);
  567. metadata.set_cur_step(num_step_);
  568. metadata.set_backend(device_target_);
  569. metadata.set_cur_node(cur_name_);
  570. metadata.set_training_done(training_done_);
  571. metadata.set_ms_version(version_);
  572. MS_LOG(INFO) << "Is training done?" << training_done_;
  573. // set graph munber to not_dataset_graph_sum_
  574. metadata.set_graph_num(not_dataset_graph_sum_);
  575. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  576. bool ret = false;
  577. if (reply_metadata.status() == reply_metadata.OK) {
  578. if (version_check) {
  579. // get type of the command in meta data reply, it should be version matched
  580. DebuggerCommand cmd = GetCommand(reply_metadata);
  581. if (cmd != DebuggerCommand::kVersionMatchedCMD) {
  582. MS_LOG(ERROR) << "MindInsight version is too old, Mindspore version is " << version_;
  583. Exit();
  584. } else {
  585. if (GetMiVersionMatched(reply_metadata)) {
  586. MS_LOG(INFO) << "MindSpore version is " << version_ << " matches MindInsight version.";
  587. ret = true;
  588. } else {
  589. MS_LOG(ERROR) << "MindSpore version " << version_ << ", did not match MindInsight version.";
  590. CommandLoop();
  591. }
  592. }
  593. } else {
  594. // version check is done before so we can just return true here
  595. ret = true;
  596. }
  597. } else {
  598. MS_LOG(ERROR) << "Error: SendMetadata failed";
  599. }
  600. return ret;
  601. }
  602. void Debugger::SendMultiGraphsAndSuspend(const std::list<GraphProto> &graph_proto_list) {
  603. if (!SendMetadata(true)) {
  604. return;
  605. }
  606. // send multiple graphs to mindinght server
  607. // split graph into chunks if one graph is larger than chunk size
  608. std::list<Chunk> chunked_graph_proto_list;
  609. Chunk chunk;
  610. for (auto graph : graph_proto_list) {
  611. std::string str = graph.SerializeAsString();
  612. auto graph_size = graph.ByteSize();
  613. if (graph_size > g_chunk_size) {
  614. auto sub_graph_str = grpc_client_->ChunkString(str, graph_size);
  615. for (unsigned int i = 0; i < sub_graph_str.size(); i++) {
  616. chunk.set_buffer(sub_graph_str[i]);
  617. if (i < sub_graph_str.size() - 1) {
  618. chunk.set_finished(false);
  619. } else {
  620. chunk.set_finished(true);
  621. }
  622. chunked_graph_proto_list.push_back(chunk);
  623. }
  624. } else {
  625. chunk.set_buffer(str);
  626. chunk.set_finished(true);
  627. chunked_graph_proto_list.push_back(chunk);
  628. }
  629. }
  630. EventReply reply = grpc_client_->SendMultiGraphs(chunked_graph_proto_list);
  631. if (reply.status() != reply.OK) {
  632. MS_LOG(ERROR) << "Error: SendGraph failed";
  633. }
  634. // enter command loop, wait and process commands
  635. CommandLoop();
  636. }
  637. void Debugger::CommandLoop() {
  638. // prepare metadata
  639. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  640. Metadata metadata;
  641. metadata.set_device_name(device_name);
  642. metadata.set_cur_step(num_step_);
  643. metadata.set_backend(device_target_);
  644. metadata.set_cur_node(cur_name_);
  645. metadata.set_training_done(training_done_);
  646. // loop exit flag
  647. bool run = false;
  648. int num_wait_fail = 0;
  649. const int max_num_wait_fail = 5;
  650. while (!run) {
  651. // wait for command
  652. EventReply reply = grpc_client_->WaitForCommand(metadata);
  653. if (reply.status() != reply.OK) {
  654. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  655. num_wait_fail++;
  656. if (num_wait_fail > max_num_wait_fail) {
  657. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session.";
  658. MS_LOG(ERROR) << "Failed to connect to MindInsight debugger server. Please check the config "
  659. "of debugger host and port.";
  660. Exit();
  661. run = true;
  662. } else {
  663. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  664. << num_wait_fail << "s";
  665. std::this_thread::sleep_for(std::chrono::seconds(num_wait_fail));
  666. }
  667. continue;
  668. }
  669. // get type of the command in reply
  670. DebuggerCommand cmd = GetCommand(reply);
  671. if (cmd == DebuggerCommand::kUnknownCMD) {
  672. MS_LOG(DEBUG) << "Debug: debugger received unknown command";
  673. continue;
  674. }
  675. MS_LOG(INFO) << "received command: ";
  676. switch (cmd) {
  677. case DebuggerCommand::kUnknownCMD:
  678. MS_LOG(INFO) << "UnknownCMD";
  679. break;
  680. case DebuggerCommand::kExitCMD:
  681. MS_LOG(INFO) << "ExitCMD";
  682. Exit();
  683. // Used for debugger termination
  684. run = true;
  685. break;
  686. case DebuggerCommand::kRunCMD:
  687. ProcessRunCMD(reply);
  688. if (GetRunLevel(reply) != "recheck") {
  689. // exit loop
  690. run = true;
  691. }
  692. break;
  693. case DebuggerCommand::kSetCMD:
  694. ProcessKSetCMD(reply);
  695. break;
  696. case DebuggerCommand::kViewCMD:
  697. ProcessKViewCMD(reply);
  698. break;
  699. case DebuggerCommand::kVersionMatchedCMD:
  700. MS_LOG(ERROR) << "Received unexpected Version Matched CMD from Mindinsight.";
  701. Exit();
  702. break;
  703. default:
  704. MS_LOG(ERROR) << "Received unknown CMD from Mindinsight";
  705. Exit();
  706. break;
  707. }
  708. }
  709. }
  710. void Debugger::ProcessRunCMD(const EventReply &reply) {
  711. MS_LOG(INFO) << "RunCMD";
  712. if (GetRunLevel(reply) == "recheck") {
  713. MS_LOG(INFO) << "rechecking all watchpoints";
  714. SendWatchpoints(CheckWatchpoints("", nullptr, true));
  715. } else {
  716. // no longer the initial suspension.
  717. initial_suspend_ = false;
  718. // print run cmd content
  719. // get run_level and node_name
  720. run_level_ = GetRunLevel(reply);
  721. node_name_ = GetNodeName(reply);
  722. MS_LOG(INFO) << "run_level: " << run_level_;
  723. MS_LOG(INFO) << "node_name_: " << node_name_;
  724. }
  725. }
  726. void Debugger::ProcessKSetCMD(const EventReply &reply) {
  727. MS_LOG(INFO) << "SetCMD";
  728. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  729. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  730. if (GetWatchpointDelete(reply)) {
  731. MS_LOG(INFO) << "Deleting watchpoint";
  732. RemoveWatchpoint(GetWatchpointID(reply));
  733. } else {
  734. MS_LOG(INFO) << "Setting watchpoint";
  735. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  736. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  737. for (const auto &node : recieved_nodes) {
  738. MS_LOG(INFO) << "node name: " << node.node_name();
  739. MS_LOG(INFO) << "node type: " << node.node_type();
  740. }
  741. ProtoVector<WatchCondition_Parameter> parameters = GetParameters(reply);
  742. for (const auto &parameter : parameters) {
  743. MS_LOG(INFO) << "parameter name: " << parameter.name();
  744. MS_LOG(INFO) << "parameter is disabled: " << parameter.disabled();
  745. MS_LOG(INFO) << "parameter value: " << parameter.value();
  746. }
  747. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply), GetParameters(reply));
  748. }
  749. }
  750. void Debugger::ProcessKViewCMD(const EventReply &reply) {
  751. MS_LOG(INFO) << "ViewCMD";
  752. // print view cmd content
  753. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  754. for (auto received_tensor : received_tensors) {
  755. MS_LOG(INFO) << "tensor node name: " << received_tensor.node_name();
  756. MS_LOG(INFO) << "tensor slot: " << received_tensor.slot();
  757. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << received_tensor.finished() << std::noboolalpha;
  758. MS_LOG(INFO) << "tensor iter: " << received_tensor.iter();
  759. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << received_tensor.truncate() << std::noboolalpha;
  760. }
  761. MS_LOG(INFO) << "Sending tensors";
  762. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  763. // print view cmd reply
  764. for (auto tensor : tensors) {
  765. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  766. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  767. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  768. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  769. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  770. MS_LOG(INFO) << "tensor dims: ";
  771. for (auto dim : tensor.dims()) {
  772. MS_LOG(INFO) << dim << ",";
  773. }
  774. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  775. }
  776. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  777. if (send_tensors_reply.status() != debugger::EventReply::OK) {
  778. MS_LOG(ERROR) << "Error: SendTensors failed";
  779. }
  780. }
  781. void AddTensorProtoInfo(TensorProto *tensor_item, const TensorProto &tensor) {
  782. tensor_item->set_node_name(tensor.node_name());
  783. tensor_item->set_slot(tensor.slot());
  784. tensor_item->set_iter(tensor.iter());
  785. tensor_item->set_truncate(tensor.truncate());
  786. tensor_item->clear_tensor_content();
  787. tensor_item->clear_data_type();
  788. tensor_item->clear_dims();
  789. }
  790. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id,
  791. const ProtoVector<WatchCondition_Parameter> &parameters) {
  792. std::vector<std::tuple<std::string, bool>> check_node_list;
  793. std::vector<DebugServices::parameter_t> parameter_list;
  794. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  795. [](const WatchNode &node) -> std::tuple<std::string, bool> {
  796. return make_tuple(node.node_name(), node.node_type() == "scope");
  797. });
  798. std::transform(
  799. parameters.begin(), parameters.end(), std::back_inserter(parameter_list),
  800. [](const WatchCondition_Parameter &parameter) -> DebugServices::parameter_t {
  801. return DebugServices::parameter_t{parameter.name(), parameter.disabled(), parameter.value(), parameter.hit()};
  802. });
  803. debug_services_->AddWatchpoint(id, condition.condition(), condition.value(), check_node_list, parameter_list);
  804. }
  805. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->RemoveWatchpoint(id); }
  806. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  807. std::vector<std::string> name;
  808. std::vector<std::string> ret_name;
  809. std::vector<char *> data_ptr;
  810. std::vector<ssize_t> data_size;
  811. std::vector<unsigned int> dtype;
  812. std::vector<std::vector<int64_t>> shape;
  813. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  814. // ret_name will contain tensor names that are found in TensorLoader
  815. // items in ret_name will be in the same order with tensors if found
  816. debug_services_->ReadNodesTensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  817. std::list<TensorProto> tensor_list;
  818. unsigned int result_index = 0;
  819. for (auto tensor : tensors) {
  820. ssize_t size_iter = 0;
  821. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  822. TensorProto tensor_item;
  823. tensor_item.set_finished(true);
  824. AddTensorProtoInfo(&tensor_item, tensor);
  825. tensor_list.push_back(tensor_item);
  826. continue;
  827. }
  828. ssize_t tensor_size = data_size[result_index];
  829. while (size_iter < tensor_size) {
  830. ssize_t chunk_size = g_chunk_size;
  831. TensorProto tensor_item;
  832. tensor_item.set_finished(false);
  833. if (tensor_size - size_iter <= g_chunk_size) {
  834. chunk_size = tensor_size - size_iter;
  835. tensor_item.set_finished(true);
  836. }
  837. AddTensorProtoInfo(&tensor_item, tensor);
  838. // return empty tensor if didn't find the requested tensor
  839. tensor_item.set_tensor_content(data_ptr[result_index] + size_iter, chunk_size);
  840. tensor_item.set_data_type((debugger::DataType)dtype[result_index]);
  841. for (auto &elem : shape[result_index]) {
  842. tensor_item.add_dims(elem);
  843. }
  844. // add tensor to result list and increment result_index to check next item in ret_name
  845. tensor_list.push_back(tensor_item);
  846. size_iter += g_chunk_size;
  847. }
  848. result_index++;
  849. }
  850. return tensor_list;
  851. }
  852. void Debugger::Exit() {
  853. // clear resource before exit
  854. // debugger will notify main thread to exit because main thread can only exit at step boundary
  855. pipeline::ExecutorPy::DebugTerminate(true);
  856. }
  857. std::list<WatchpointHit> Debugger::CheckWatchpoints(const std::string &watchnode, const CNodePtr &kernel,
  858. bool recheck) {
  859. std::vector<std::string> name;
  860. std::vector<std::string> slot;
  861. std::vector<int> condition;
  862. std::vector<unsigned int> watchpoint_id;
  863. std::vector<std::string> overflow_ops;
  864. std::vector<std::vector<DebugServices::parameter_t>> parameters;
  865. std::vector<int32_t> error_codes;
  866. #ifdef ENABLE_D
  867. overflow_ops = CheckOpOverflow();
  868. for (auto const &item : overflow_ops) {
  869. MS_LOG(DEBUG) << "overflow_ops item = " << item << std::endl;
  870. }
  871. #endif
  872. std::vector<std::shared_ptr<TensorData>> tensor_list;
  873. if (watchnode.empty()) {
  874. tensor_list = debug_services_->GetTensor();
  875. } else {
  876. tensor_list = debug_services_->GetNodeTensor(kernel);
  877. }
  878. std::vector<std::string> file_list;
  879. MS_LOG(INFO) << "checkwatchpoints call for step " << num_step_;
  880. debug_services_->CheckWatchpoints(&name, &slot, &condition, &watchpoint_id, &parameters, &error_codes, overflow_ops,
  881. file_list, &tensor_list, initial_suspend_, watchnode.empty(), recheck);
  882. std::list<WatchpointHit> hits;
  883. for (unsigned int i = 0; i < name.size(); i++) {
  884. WatchpointHit hit;
  885. std::vector<DebugServices::parameter_t> &parameter = parameters[i];
  886. hit.set_id(watchpoint_id[i]);
  887. hit.set_error_code(error_codes[i]);
  888. // here TensorProto act as a tensor indicator, not sending tensor content
  889. TensorProto *tensor_item = hit.mutable_tensor();
  890. tensor_item->set_node_name(name[i]);
  891. tensor_item->set_slot(slot[i]);
  892. tensor_item->set_finished(true);
  893. WatchCondition *condition_item = hit.mutable_watch_condition();
  894. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  895. for (const auto &p : parameter) {
  896. auto x = condition_item->mutable_params()->Add();
  897. x->set_name(p.name);
  898. x->set_disabled(p.disabled);
  899. x->set_value(p.value);
  900. x->set_hit(p.hit);
  901. x->set_actual_value(p.actual_value);
  902. }
  903. hits.push_back(hit);
  904. }
  905. return hits;
  906. }
  907. void Debugger::SendWatchpoints(const std::list<WatchpointHit> &points) {
  908. // send info about watchpoint
  909. if (!points.empty()) {
  910. EventReply reply = grpc_client_->SendWatchpointHits(points);
  911. if (reply.status() != reply.OK) {
  912. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  913. }
  914. }
  915. }
  916. bool Debugger::DumpTensorToFile(const std::string &tensor_name, bool trans_flag, const std::string &filepath,
  917. const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
  918. TypeId device_type, const std::string &addr_format, size_t slot) const {
  919. return debug_services_.get()->DumpTensorToFile(tensor_name, trans_flag, filepath, host_fmt, host_shape, host_type,
  920. device_type, addr_format, slot);
  921. }
  922. bool Debugger::DebugServicesIsWatchPoint(const std::string &kernel_name, const CNodePtr &kernel) const {
  923. return debug_services_.get()->IsWatchPoint(kernel_name, kernel);
  924. }
  925. void Debugger::EmptyTensor() { debug_services_.get()->EmptyTensor(); }
  926. void Debugger::SetTensorLoaderIterNum(uint32_t iter_num) { debug_services_.get()->SetTensorLoaderIterNum(iter_num); }
  927. void Debugger::EmptyPrevTensor() { debug_services_.get()->EmptyPrevTensor(); }
  928. uint32_t Debugger::GetTensorLoaderIterNum() const { return debug_services_.get()->GetTensorLoaderIterNum(); }
  929. bool Debugger::LoadNewTensor(const std::shared_ptr<TensorData> &tensor, bool keep_prev) {
  930. return debug_services_.get()->LoadNewTensor(tensor, keep_prev);
  931. }
  932. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  933. DebuggerCommand GetCommand(const EventReply &reply) {
  934. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  935. switch (reply.cmd_case()) {
  936. case debugger::EventReply::CmdCase::kExit:
  937. cmd = DebuggerCommand::kExitCMD;
  938. break;
  939. case debugger::EventReply::CmdCase::kRunCmd:
  940. cmd = DebuggerCommand::kRunCMD;
  941. break;
  942. case debugger::EventReply::CmdCase::kSetCmd:
  943. cmd = DebuggerCommand::kSetCMD;
  944. break;
  945. case debugger::EventReply::CmdCase::kViewCmd:
  946. cmd = DebuggerCommand::kViewCMD;
  947. break;
  948. case debugger::EventReply::CmdCase::kVersionMatched:
  949. cmd = DebuggerCommand::kVersionMatchedCMD;
  950. break;
  951. default:
  952. MS_LOG(DEBUG) << "Debug: UnknownCMD";
  953. break;
  954. }
  955. return cmd;
  956. }
  957. ProtoVector<WatchCondition_Parameter> GetParameters(const EventReply &reply) {
  958. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  959. MS_LOG(ERROR) << "Error: Can not get Parameters from command. Returning default value: ProtoVector<Parameter>().";
  960. return ProtoVector<WatchCondition_Parameter>();
  961. }
  962. return reply.set_cmd().watch_condition().params();
  963. }
  964. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  965. if (!reply.has_set_cmd()) {
  966. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  967. return ProtoVector<WatchNode>();
  968. }
  969. return reply.set_cmd().watch_nodes();
  970. }
  971. std::string GetRunLevel(const EventReply &reply) {
  972. if (!reply.has_run_cmd()) {
  973. MS_LOG(ERROR) << "Error: Not RunCMD, can not get RunLevel. Returning default value: "
  974. "";
  975. return "";
  976. }
  977. return reply.run_cmd().run_level();
  978. }
  979. std::string GetNodeName(const EventReply &reply) {
  980. if (!reply.has_run_cmd()) {
  981. MS_LOG(ERROR) << "Error: Not RunCMD, can not get NodeName. Returning default value: "
  982. "";
  983. return "";
  984. }
  985. return reply.run_cmd().node_name();
  986. }
  987. WatchCondition GetWatchcondition(const EventReply &reply) {
  988. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  989. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  990. return WatchCondition();
  991. }
  992. return reply.set_cmd().watch_condition();
  993. }
  994. int32_t GetWatchpointID(const EventReply &reply) {
  995. if (!reply.has_set_cmd()) {
  996. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  997. return 0;
  998. }
  999. return reply.set_cmd().id();
  1000. }
  1001. bool GetWatchpointDelete(const EventReply &reply) {
  1002. if (!reply.has_set_cmd()) {
  1003. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  1004. return false;
  1005. }
  1006. return reply.set_cmd().delete_();
  1007. }
  1008. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  1009. if (!reply.has_view_cmd()) {
  1010. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  1011. return ProtoVector<TensorProto>();
  1012. }
  1013. return reply.view_cmd().tensors();
  1014. }
  1015. std::string GetTensorFullName(const TensorProto &tensor) {
  1016. string node_name = tensor.node_name();
  1017. if (tensor.truncate()) {
  1018. // scopes in node name are separated by '/'
  1019. // use the name without scope if truncate is true
  1020. std::size_t found = node_name.find_last_of("/");
  1021. node_name = node_name.substr(found + 1);
  1022. }
  1023. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  1024. }
  1025. bool GetMiVersionMatched(const EventReply &reply) { return reply.version_matched(); }
  1026. bool Debugger::partial_memory() const { return partial_memory_; }
  1027. void Debugger::SetCurNode(const std::string &cur_name) {
  1028. // access lock for public method
  1029. std::lock_guard<std::mutex> a_lock(access_lock_);
  1030. cur_name_ = cur_name;
  1031. }
  1032. std::string Debugger::run_level() const { return run_level_; }
  1033. void Debugger::SetStepNum(int32_t cur_num_step) {
  1034. // access lock for public method
  1035. std::lock_guard<std::mutex> a_lock(access_lock_);
  1036. num_step_ = cur_num_step;
  1037. }
  1038. int32_t Debugger::step_num() const { return num_step_; }
  1039. uint64_t BytestoUInt64(const std::vector<char> &buffer) {
  1040. return le64toh(*reinterpret_cast<const uint64_t *>(buffer.data()));
  1041. }
  1042. std::vector<std::string> Debugger::CheckOpOverflow() {
  1043. std::vector<std::string> op_names;
  1044. std::string overflow_bin_path = DumpJsonParser::GetInstance().GetOpOverflowBinPath(graph_ptr_->graph_id());
  1045. MS_LOG(INFO) << "Processing bin file path " << overflow_bin_path;
  1046. DIR *d = opendir(overflow_bin_path.c_str());
  1047. if (d != nullptr) {
  1048. struct dirent *dir = nullptr;
  1049. while ((dir = readdir(d)) != nullptr) {
  1050. if (dir->d_type == DT_REG) {
  1051. std::string file_path = overflow_bin_path;
  1052. std::string file_name = dir->d_name;
  1053. (void)file_path.append(file_name);
  1054. std::fstream infile;
  1055. infile.open(file_path.c_str(), std::ios::binary | std::ios::in);
  1056. if (!infile.is_open()) {
  1057. MS_LOG(ERROR) << "Failed to open overflow bin file " << file_name;
  1058. continue;
  1059. }
  1060. // start of op overflow data in bin file
  1061. const uint32_t offset = 321;
  1062. (void)infile.seekg(offset, std::ios::beg);
  1063. std::vector<char> buffer;
  1064. // size of op overflow info section
  1065. const size_t buf_size = 256;
  1066. buffer.resize(buf_size);
  1067. (void)infile.read(buffer.data(), buf_size);
  1068. const uint8_t stream_id_offset = 16;
  1069. const uint8_t task_id_offset = 24;
  1070. // The stream_id and task_id in the dump file are 8 byte fields for extensibility purpose, but only hold 4
  1071. // byte values currently.
  1072. uint64_t stream_id = BytestoUInt64(std::vector<char>(buffer.begin() + stream_id_offset, buffer.end()));
  1073. uint64_t task_id = BytestoUInt64(std::vector<char>(buffer.begin() + task_id_offset, buffer.end()));
  1074. MS_LOG(INFO) << "Overflow bin file " << file_name << ", overflow stream_id " << stream_id << ", task_id "
  1075. << task_id << ".";
  1076. auto op = debugger_->stream_task_to_opname_.find(std::make_pair(stream_id, task_id));
  1077. if (op != debugger_->stream_task_to_opname_.end()) {
  1078. MS_LOG(INFO) << "Overflow detected on node " << op->second << std::endl;
  1079. op_names.push_back(op->second);
  1080. } else {
  1081. MS_LOG(INFO) << "No overflow is detected " << std::endl;
  1082. }
  1083. infile.close();
  1084. }
  1085. }
  1086. } else {
  1087. MS_LOG(INFO) << "OverFlow bin directory does not exist!";
  1088. }
  1089. closedir(d);
  1090. if (!op_names.empty()) {
  1091. MS_LOG(INFO) << "These operation overflows are detected " << op_names;
  1092. }
  1093. return op_names;
  1094. }
  1095. void Debugger::SetTrainingDone(bool training_done) { training_done_ = training_done; }
  1096. bool Debugger::CheckPort(const std::string &port) const {
  1097. int num = 0;
  1098. const int min_port_num = 1;
  1099. const int max_port_num = 65535;
  1100. const int decimal = 10;
  1101. if (port[0] == '0' && port[1] != '\0') return false;
  1102. int i = 0;
  1103. while (port[i] != '\0') {
  1104. if (port[i] < '0' || port[i] > '9') return false;
  1105. num = num * decimal + (port[i] - '0');
  1106. if (num > max_port_num) return false;
  1107. i++;
  1108. }
  1109. if (num < min_port_num) return false;
  1110. return true;
  1111. }
  1112. bool Debugger::CheckIp(const std::string &host) const {
  1113. std::regex reg_ip(
  1114. "(25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])"
  1115. "[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
  1116. "[.](25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])"
  1117. "[.](25[0-4]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[1-9])");
  1118. std::smatch smat;
  1119. std::string host_str = host;
  1120. return std::regex_match(host_str, smat, reg_ip);
  1121. }
  1122. uint32_t Debugger::GetFirstRunGraphId() const { return rungraph_id_list_.front(); }
  1123. void Debugger::LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output_index) {
  1124. MS_EXCEPTION_IF_NULL(anf_node);
  1125. if (!anf_node->isa<Parameter>() && !anf_node->isa<ValueNode>()) {
  1126. return;
  1127. }
  1128. // When MindRT is used, only ValueNodes and ParameterWeights can be loaded from device to host
  1129. if (MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT) && (device_target_ == kGPUDevice)) {
  1130. if (!anf_node->isa<ValueNode>() &&
  1131. !(anf_node->isa<Parameter>() && AnfAlgo::IsParameterWeight(anf_node->cast<ParameterPtr>()))) {
  1132. return;
  1133. }
  1134. }
  1135. // for parameters and value nodes, set its execution order to be 0;
  1136. int exec_order = 0;
  1137. std::string node_name = GetKernelNodeName(anf_node);
  1138. GetFileKernelName(NOT_NULL(&node_name));
  1139. // check if output adde exists, if not, return;
  1140. if (!AnfAlgo::OutputAddrExist(anf_node, output_index)) {
  1141. return;
  1142. }
  1143. auto addr = AnfAlgo::GetOutputAddr(anf_node, output_index);
  1144. MS_EXCEPTION_IF_NULL(addr);
  1145. auto type = AnfAlgo::GetOutputInferDataType(anf_node, output_index);
  1146. if (!IsTypeDebuggerSupported(type)) {
  1147. return;
  1148. }
  1149. auto format = kOpFormat_DEFAULT;
  1150. string tensor_name = node_name + ':' + "0";
  1151. ShapeVector int_shapes = trans::GetRuntimePaddingShape(anf_node, output_index);
  1152. bool keep_prev;
  1153. if (anf_node->isa<Parameter>()) {
  1154. keep_prev = true;
  1155. debug_services_->MoveTensorCurrentToPrev(tensor_name);
  1156. } else {
  1157. keep_prev = false;
  1158. }
  1159. bool ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, 0, keep_prev);
  1160. if (!ret) {
  1161. MS_LOG(ERROR) << "LoadMemToHost:"
  1162. << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
  1163. }
  1164. }
  1165. void Debugger::LoadParametersAndConst() {
  1166. if (!(debugger_enabled_ || CheckDebuggerDumpEnabled())) return;
  1167. MS_EXCEPTION_IF_NULL(graph_ptr_);
  1168. // load parameters
  1169. MS_LOG(INFO) << "Start to load Parameters!";
  1170. const auto &parameters = graph_ptr_->inputs();
  1171. for (auto &item : parameters) {
  1172. LoadSingleAnfnode(item, PARAMETER_OUTPUT_INDEX);
  1173. }
  1174. // load value nodes
  1175. // get all constant avlues from the graph
  1176. MS_LOG(INFO) << "Start to load value nodes!";
  1177. const auto value_nodes = graph_ptr_->graph_value_nodes();
  1178. for (auto &item : value_nodes) {
  1179. LoadSingleAnfnode(item, VALUE_NODE_OUTPUT_INDEX);
  1180. }
  1181. }
  1182. void Debugger::LoadParametersAndConst(const KernelGraphPtr &graph) {
  1183. if (!(debugger_enabled_ || CheckDebuggerDumpEnabled())) return;
  1184. MS_EXCEPTION_IF_NULL(graph);
  1185. // load parameters
  1186. MS_LOG(INFO) << "Start to load Parameters for graph " << graph->graph_id();
  1187. const auto &parameters = graph_ptr_->inputs();
  1188. for (auto &item : parameters) {
  1189. LoadSingleAnfnode(item, PARAMETER_OUTPUT_INDEX);
  1190. }
  1191. // load value nodes
  1192. // get all constant avlues from the graph
  1193. MS_LOG(INFO) << "Start to load value nodes for graph " << graph->graph_id();
  1194. const auto value_nodes = graph_ptr_->graph_value_nodes();
  1195. for (auto &item : value_nodes) {
  1196. LoadSingleAnfnode(item, VALUE_NODE_OUTPUT_INDEX);
  1197. }
  1198. }
  1199. void Debugger::LoadGraphOutputs() {
  1200. if (!(debugger_enabled() && device_target_ == kAscendDevice)) return;
  1201. MS_EXCEPTION_IF_NULL(graph_ptr_);
  1202. const auto &apply_kernels = graph_ptr_->execution_order();
  1203. // for kernels, execution order starts from 1
  1204. int exec_order = 1;
  1205. for (const auto &node : apply_kernels) {
  1206. MS_EXCEPTION_IF_NULL(node);
  1207. std::string kernel_name = GetKernelNodeName(node);
  1208. auto output_size = AnfAlgo::GetOutputTensorNum(node);
  1209. if (partial_memory_) {
  1210. if (!debug_services_->IsWatchPoint(kernel_name, node)) {
  1211. continue;
  1212. }
  1213. }
  1214. for (size_t j = 0; j < output_size; ++j) {
  1215. if (!AnfAlgo::OutputAddrExist(node, j)) {
  1216. MS_LOG(INFO) << "Cannot find output addr for slot " << j << " for " << kernel_name;
  1217. continue;
  1218. }
  1219. auto addr = AnfAlgo::GetOutputAddr(node, j);
  1220. MS_EXCEPTION_IF_NULL(addr);
  1221. auto type = AnfAlgo::GetOutputInferDataType(node, j);
  1222. if (!IsTypeDebuggerSupported(type)) {
  1223. continue;
  1224. }
  1225. auto format = kOpFormat_DEFAULT;
  1226. string tensor_name = kernel_name + ':' + std::to_string(j);
  1227. ShapeVector int_shapes = trans::GetRuntimePaddingShape(node, j);
  1228. auto ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false);
  1229. if (!ret) {
  1230. MS_LOG(ERROR) << "LoadMemToHost:"
  1231. << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
  1232. }
  1233. }
  1234. exec_order = exec_order + 1;
  1235. }
  1236. }
  1237. void Debugger::UpdateStepNum(const session::KernelGraph *graph) {
  1238. // update step number if we are processing the first graph (to support multigraph)
  1239. if (device_target_ == kGPUDevice && (debugger_enabled_ || device::KernelRuntime::DumpDataEnabledIteration()) &&
  1240. (graph->graph_id() == debugger_->GetFirstRunGraphId())) {
  1241. // access lock for public method
  1242. std::lock_guard<std::mutex> a_lock(access_lock_);
  1243. ++num_step_;
  1244. }
  1245. }
  1246. void Debugger::UpdateStepNumGPU() {
  1247. // UpdateStepNum with DebugActor::DebugOnStepEnd
  1248. if (device_target_ == kGPUDevice && (debugger_enabled_ || DumpDataEnabledIteration())) {
  1249. // access lock for public method
  1250. std::lock_guard<std::mutex> a_lock(access_lock_);
  1251. ++num_step_;
  1252. }
  1253. }
  1254. void Debugger::ClearCurrentData() {
  1255. if ((device_target_ == kGPUDevice) && (debugger_enabled_ || device::KernelRuntime::DumpDataEnabledIteration())) {
  1256. if (debug_services_) {
  1257. debug_services_->EmptyCurrentTensor();
  1258. } else {
  1259. MS_LOG(ERROR) << "debug_services_ is nullptr";
  1260. }
  1261. }
  1262. }
  1263. bool Debugger::TensorExistsInCurrent(const std::string &tensor_name) {
  1264. return debug_services_->TensorExistsInCurrent(tensor_name);
  1265. }
  1266. } // namespace mindspore