You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <fstream>
  17. #include <tuple>
  18. #include <vector>
  19. #include <algorithm>
  20. #include "debug/debugger/debugger.h"
  21. #include "pipeline/jit/pipeline.h"
  22. #include "backend/session/anf_runtime_algorithm.h"
  23. #include "runtime/device/kernel_runtime_manager.h"
  24. using debugger::EventReply;
  25. using debugger::GraphProto;
  26. using debugger::ModelProto;
  27. using debugger::TensorProto;
  28. using debugger::WatchCondition;
  29. using debugger::WatchCondition_Condition_inf;
  30. using debugger::WatchCondition_Condition_nan;
  31. using debugger::WatchNode;
  32. using debugger::WatchpointHit;
  33. namespace mindspore {
  34. DebuggerPtr Debugger::debugger_ = nullptr;
  35. std::mutex Debugger::instance_lock_;
  36. Debugger::Debugger()
  37. : grpc_client_(nullptr),
  38. debug_services_(nullptr),
  39. device_id_(0),
  40. device_target_(""),
  41. num_step_(0),
  42. debugger_enabled_(false),
  43. run_level_(""),
  44. node_name_(""),
  45. cur_name_(""),
  46. is_dataset_graph_(false),
  47. partial_memory_(false) {}
  48. void Debugger::Init(const uint32_t device_id, const std::string device_target) {
  49. // access lock for public method
  50. std::lock_guard<std::mutex> a_lock(access_lock_);
  51. // save device_id
  52. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  53. device_id_ = device_id;
  54. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  55. device_target_ = device_target;
  56. }
  57. void Debugger::EnableDebugger() {
  58. // reset some of the class members
  59. num_step_ = 0;
  60. debugger_enabled_ = false;
  61. partial_memory_ = false;
  62. grpc_client_ = nullptr;
  63. debug_services_ = nullptr;
  64. // see if dump is enabled
  65. bool dump_enabled = false;
  66. if (device_target_ == kGPUDevice) {
  67. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  68. MS_EXCEPTION_IF_NULL(runtime_instance);
  69. dump_enabled = runtime_instance->DumpDataEnabled();
  70. }
  71. // get env variables to configure debugger
  72. const char *env_enable_str = std::getenv("ENABLE_MS_DEBUGGER");
  73. if (env_enable_str != nullptr) {
  74. MS_LOG(INFO) << "Getenv ENABLE_MS_DEBUGGER: " << env_enable_str;
  75. if (std::strcmp(env_enable_str, "1") == 0) {
  76. debugger_enabled_ = true;
  77. }
  78. }
  79. if (!debugger_enabled_ && !dump_enabled) {
  80. MS_LOG(WARNING) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  81. return;
  82. }
  83. // configure grpc host
  84. const char *env_host_str = std::getenv("MS_DEBUGGER_HOST");
  85. std::string host;
  86. if (env_host_str != nullptr) {
  87. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  88. host = std::string(env_host_str);
  89. } else {
  90. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  91. host = "localhost";
  92. }
  93. // configure grpc port
  94. const char *env_port_str = std::getenv("MS_DEBUGGER_PORT");
  95. std::string port;
  96. if (env_port_str != nullptr) {
  97. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  98. port = std::string(env_port_str);
  99. } else {
  100. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  101. port = "50051";
  102. }
  103. // configure partial memory reuse
  104. const char *env_partial_mem_str = std::getenv("MS_DEBUGGER_PARTIAL_MEM");
  105. if (env_partial_mem_str != nullptr) {
  106. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
  107. if (std::strcmp(env_partial_mem_str, "1") == 0) {
  108. partial_memory_ = true;
  109. }
  110. }
  111. // switch memory reuse on or off
  112. auto context_ptr = MsContext::GetInstance();
  113. MS_EXCEPTION_IF_NULL(context_ptr);
  114. context_ptr->set_enable_mem_reuse(partial_memory_);
  115. // print some message about memory reuse to user
  116. if (partial_memory_) {
  117. MS_LOG(WARNING) << "Partial Memory Reuse is enabled. Note: 1. Please only set watchpoints before running the first "
  118. "step. 2. Tensor values are only available for nodes that are watched by any watchpoint.";
  119. } else {
  120. MS_LOG(WARNING) << "Memory Reuse is disabled. Set environment variable MS_DEBUGGER_PARTIAL_MEM=1 to reduce memory "
  121. "usage for large models.";
  122. }
  123. // initialize grpc client
  124. if (debugger_enabled_) {
  125. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  126. }
  127. debug_services_ = std::make_unique<DebugServices>();
  128. }
  129. void Debugger::Reset() {
  130. // access lock for public method
  131. std::lock_guard<std::mutex> a_lock(access_lock_);
  132. // reset components
  133. device_id_ = 0;
  134. device_target_ = "";
  135. num_step_ = 0;
  136. debugger_enabled_ = false;
  137. is_dataset_graph_ = false;
  138. partial_memory_ = false;
  139. graph_ptr_ = nullptr;
  140. grpc_client_ = nullptr;
  141. debug_services_ = nullptr;
  142. }
  143. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  144. // access lock for public method
  145. std::lock_guard<std::mutex> a_lock(access_lock_);
  146. // check and save graph_ptr, suspend if graph is new
  147. CheckGraphPtr(graph_ptr);
  148. }
  149. void Debugger::PostExecute() {
  150. // access lock for public method
  151. std::lock_guard<std::mutex> a_lock(access_lock_);
  152. // analyze tensor data and send the watchpoints been hit
  153. if (run_level_ == "node") {
  154. MS_LOG(INFO) << "Debugger is in node level mode ";
  155. return;
  156. }
  157. if (debugger_enabled_ && !is_dataset_graph_) {
  158. if (device_target_ != kGPUDevice) {
  159. num_step_++;
  160. MS_LOG(INFO) << "Debugger suspend at end of step; number of steps executed: " << num_step_;
  161. SendWatchpointsAndSuspend(CheckWatchpoints());
  162. } else {
  163. CommandLoop();
  164. }
  165. }
  166. }
  167. bool Debugger::ReadNodeDataRequired() {
  168. if (debugger_enabled_ && !is_dataset_graph_) {
  169. auto watchpoint_table = debug_services_->GetWatchpointTable();
  170. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, watchpoint_table);
  171. // if node has a watchpoint on it, is next_to node, or continue_to node then read the kernel tensor data
  172. if (is_watchpoint || (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_))) {
  173. return true;
  174. }
  175. }
  176. return false;
  177. }
  178. void Debugger::PostExecuteNode() {
  179. // access lock for public method
  180. std::lock_guard<std::mutex> a_lock(access_lock_);
  181. if (debugger_enabled_ && !is_dataset_graph_) {
  182. auto watchpoint_table = debug_services_->GetWatchpointTable();
  183. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, watchpoint_table);
  184. // if kernel is watchpoint,and get hit. suspend.
  185. if (is_watchpoint) {
  186. auto hits = CheckSingleWatchpoint(cur_name_);
  187. if (!hits.empty()) {
  188. SendWatchpointsAndSuspend(hits);
  189. }
  190. }
  191. // if kernel is not watchpoint and is next_to or continue_to node, suspend.
  192. if (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_)) {
  193. CommandLoop();
  194. }
  195. return;
  196. }
  197. }
  198. void Debugger::PostDebugOp() {
  199. // access lock for public method
  200. std::lock_guard<std::mutex> a_lock(access_lock_);
  201. // suspend if debugger is enabled
  202. if (debugger_enabled_ && !is_dataset_graph_) {
  203. MS_LOG(INFO) << "Debugger suspend at debug_op";
  204. CommandLoop();
  205. }
  206. }
  207. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  208. if (graph_ptr_ != graph_ptr) {
  209. MS_LOG(INFO) << "Debugger got new graph: " << graph_ptr->graph_id();
  210. // save new graph_ptr
  211. graph_ptr_ = graph_ptr;
  212. // check if it is dataset graph
  213. CheckDatasetGraph();
  214. if (!is_dataset_graph_) {
  215. // only try to enable debugger if it is not a dataset graph
  216. EnableDebugger();
  217. if (debugger_enabled_) {
  218. // get graph proto and send to mindinsight
  219. SendGraphAndSuspend(GetGraphProto());
  220. }
  221. }
  222. }
  223. }
  224. void Debugger::CheckDatasetGraph() {
  225. // print parameter node names
  226. const auto &params = graph_ptr_->inputs();
  227. for (const auto &param : params) {
  228. MS_LOG(INFO) << "param: " << param->fullname_with_scope();
  229. }
  230. // check if there is GetNext or InitDataSetQueue node
  231. const auto &nodes = graph_ptr_->execution_order();
  232. for (const auto &node : nodes) {
  233. auto node_name = AnfAlgo::GetCNodeName(node);
  234. MS_LOG(INFO) << "node: " << node->fullname_with_scope();
  235. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  236. MS_LOG(WARNING) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  237. << node_name;
  238. is_dataset_graph_ = true;
  239. return;
  240. }
  241. }
  242. is_dataset_graph_ = false;
  243. }
  244. GraphProto Debugger::GetGraphProto() const {
  245. // convert kernel graph to debugger modelproto
  246. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr_);
  247. return model.graph();
  248. }
  249. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  250. // prepare metadata
  251. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  252. Metadata metadata;
  253. metadata.set_device_name(device_name);
  254. metadata.set_cur_step(num_step_);
  255. metadata.set_backend(device_target_);
  256. metadata.set_cur_node(cur_name_);
  257. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  258. if (reply_metadata.status() != reply_metadata.OK) {
  259. MS_LOG(ERROR) << "Error: SendMetadata failed";
  260. }
  261. // send graph to mindinght server
  262. EventReply reply = grpc_client_->SendGraph(graph_proto);
  263. if (reply.status() != reply.OK) {
  264. MS_LOG(ERROR) << "Error: SendGraph failed";
  265. }
  266. // enter command loop, wait and process commands
  267. CommandLoop();
  268. }
  269. void Debugger::CommandLoop() {
  270. // prepare metadata
  271. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  272. Metadata metadata;
  273. metadata.set_device_name(device_name);
  274. metadata.set_cur_step(num_step_);
  275. metadata.set_backend(device_target_);
  276. metadata.set_cur_node(cur_name_);
  277. // loop exit flag
  278. bool run = false;
  279. int num_wait_fail = 0;
  280. const int max_num_wait_fail = 5;
  281. while (!run) {
  282. // wait for command
  283. EventReply reply = grpc_client_->WaitForCommand(metadata);
  284. if (reply.status() != reply.OK) {
  285. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  286. num_wait_fail++;
  287. if (num_wait_fail > max_num_wait_fail) {
  288. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session";
  289. Exit();
  290. }
  291. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  292. << num_wait_fail << "s";
  293. std::this_thread::sleep_for(std::chrono::milliseconds(1000 * num_wait_fail));
  294. continue;
  295. }
  296. // get type of the command in reply
  297. DebuggerCommand cmd = GetCommand(reply);
  298. if (cmd == DebuggerCommand::kUnknownCMD) {
  299. MS_LOG(ERROR) << "Error: debugger recieved unknown command";
  300. continue;
  301. }
  302. MS_LOG(INFO) << "recieved command: ";
  303. switch (cmd) {
  304. case DebuggerCommand::kUnknownCMD:
  305. MS_LOG(INFO) << "UnknownCMD";
  306. break;
  307. case DebuggerCommand::kExitCMD:
  308. MS_LOG(INFO) << "ExitCMD";
  309. Exit();
  310. break;
  311. case DebuggerCommand::kRunCMD:
  312. MS_LOG(INFO) << "RunCMD";
  313. {
  314. // print run cmd content
  315. // get run_level and node_name
  316. run_level_ = GetRunLevel(reply);
  317. node_name_ = GetNodeName(reply);
  318. MS_LOG(INFO) << "run_level: " << run_level_;
  319. MS_LOG(INFO) << "node_name_: " << node_name_;
  320. }
  321. // exit loop
  322. run = true;
  323. break;
  324. case DebuggerCommand::kSetCMD:
  325. MS_LOG(INFO) << "SetCMD";
  326. {
  327. // print set cmd content
  328. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  329. for (auto node : recieved_nodes) {
  330. MS_LOG(INFO) << "node name: " << node.node_name();
  331. MS_LOG(INFO) << "node type: " << node.node_type();
  332. }
  333. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  334. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  335. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  336. }
  337. MS_LOG(INFO) << "Setting watchpoint";
  338. if (GetWatchpointDelete(reply)) {
  339. RemoveWatchpoint(GetWatchpointID(reply));
  340. } else {
  341. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply));
  342. }
  343. break;
  344. case DebuggerCommand::kViewCMD:
  345. MS_LOG(INFO) << "ViewCMD";
  346. {
  347. // print view cmd content
  348. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  349. for (auto tensor : received_tensors) {
  350. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  351. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  352. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  353. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  354. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  355. }
  356. }
  357. MS_LOG(INFO) << "Sending tensors";
  358. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  359. {
  360. // print view cmd reply
  361. for (auto tensor : tensors) {
  362. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  363. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  364. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  365. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  366. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  367. MS_LOG(INFO) << "tensor dims: ";
  368. for (auto dim : tensor.dims()) {
  369. MS_LOG(INFO) << dim << ",";
  370. }
  371. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  372. }
  373. }
  374. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  375. if (send_tensors_reply.status() != send_tensors_reply.OK) {
  376. MS_LOG(ERROR) << "Error: SendTensors failed";
  377. }
  378. break;
  379. }
  380. }
  381. }
  382. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id) {
  383. std::vector<std::tuple<std::string, bool>> check_node_list;
  384. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  385. [](WatchNode node) -> std::tuple<std::string, bool> {
  386. return make_tuple(node.node_name(), node.node_type() == "scope");
  387. });
  388. debug_services_->AddWatchpoint(id, condition.condition(), check_node_list);
  389. }
  390. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->RemoveWatchpoint(id); }
  391. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  392. std::vector<std::string> name;
  393. std::vector<std::string> ret_name;
  394. std::vector<char *> data_ptr;
  395. std::vector<unsigned int> data_size;
  396. std::vector<TypePtr> dtype;
  397. std::vector<std::vector<int>> shape;
  398. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  399. // ret_name will contain tensor names that are found in TensorLoader
  400. // items in ret_name will be in the same order with tensors if found
  401. debug_services_->ReadNodesTensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  402. std::list<TensorProto> tensor_list;
  403. unsigned int result_index = 0;
  404. for (auto tensor : tensors) {
  405. TensorProto tensor_item;
  406. tensor_item.set_node_name(tensor.node_name());
  407. tensor_item.set_slot(tensor.slot());
  408. tensor_item.set_iter(tensor.iter());
  409. tensor_item.set_truncate(tensor.truncate());
  410. tensor_item.clear_tensor_content();
  411. tensor_item.clear_data_type();
  412. tensor_item.clear_dims();
  413. // always set finished to true before big tensor splitting is supported
  414. tensor_item.set_finished(true);
  415. // return empty tensor if didn't find the requested tensor
  416. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  417. tensor_list.push_back(tensor_item);
  418. continue;
  419. }
  420. tensor_item.set_tensor_content(data_ptr[result_index], data_size[result_index]);
  421. tensor_item.set_data_type(GetDebuggerNumberDataType(dtype[result_index]));
  422. for (auto &elem : shape[result_index]) {
  423. tensor_item.add_dims(elem);
  424. }
  425. // add tensor to result list and increment result_index to check next item in ret_name
  426. tensor_list.push_back(tensor_item);
  427. result_index++;
  428. }
  429. return tensor_list;
  430. }
  431. void Debugger::Exit() {
  432. // clear resource before exit
  433. pipeline::ClearResAtexit();
  434. std::exit(EXIT_FAILURE);
  435. }
  436. std::list<WatchpointHit> Debugger::CheckWatchpoints() const {
  437. std::vector<std::string> name;
  438. std::vector<std::string> slot;
  439. std::vector<char *> data_ptr;
  440. std::vector<unsigned int> data_size;
  441. std::vector<int> condition;
  442. std::vector<unsigned int> watchpoint_id;
  443. debug_services_->CheckWatchpoints(&name, &slot, &data_ptr, &data_size, &condition, &watchpoint_id);
  444. std::list<WatchpointHit> hits;
  445. for (unsigned int i = 0; i < name.size(); i++) {
  446. WatchpointHit hit;
  447. hit.set_id(watchpoint_id[i]);
  448. // here TensorProto act as a tensor indicator, not sending tensor content
  449. TensorProto *tensor_item = hit.mutable_tensor();
  450. tensor_item->set_node_name(name[i]);
  451. tensor_item->set_slot(slot[i]);
  452. tensor_item->set_finished(true);
  453. WatchCondition *condition_item = hit.mutable_watch_condition();
  454. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  455. hits.push_back(hit);
  456. }
  457. return hits;
  458. }
  459. std::list<WatchpointHit> Debugger::CheckSingleWatchpoint(std::string watchnode) const {
  460. auto tensor_loader = debug_services_->tensor_loader();
  461. auto tensors = tensor_loader->GetNodeTensorMap(watchnode);
  462. std::list<WatchpointHit> hits;
  463. for (std::vector<std::shared_ptr<TensorData>>::iterator it = tensors.begin(); it != tensors.end(); ++it) {
  464. auto cur_tensor = *it;
  465. std::string name = "";
  466. std::string slot = "";
  467. char *data_ptr = nullptr;
  468. unsigned int data_size = 0;
  469. int condition = -1;
  470. unsigned int watchpoint_id = -1;
  471. WatchpointHit hit;
  472. debug_services_->CheckSingleWatchpoint(cur_tensor, &name, &slot, &data_ptr, &data_size, &condition, &watchpoint_id);
  473. if (name != "") {
  474. hit.set_id(watchpoint_id);
  475. // here TensorProto act as a tensor indicator, not sending tensor content
  476. TensorProto *tensor_item = hit.mutable_tensor();
  477. tensor_item->set_node_name(name);
  478. tensor_item->set_slot(slot);
  479. tensor_item->set_finished(true);
  480. WatchCondition *condition_item = hit.mutable_watch_condition();
  481. condition_item->set_condition(debugger::WatchCondition_Condition(condition));
  482. hits.push_back(hit);
  483. }
  484. }
  485. return hits;
  486. }
  487. void Debugger::SendWatchpointsAndSuspend(const std::list<WatchpointHit> &points) {
  488. // send info about watchpoint
  489. if (!points.empty()) {
  490. EventReply reply = grpc_client_->SendWatchpointHits(points);
  491. if (reply.status() != reply.OK) {
  492. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  493. }
  494. }
  495. // enter command loop
  496. CommandLoop();
  497. }
  498. DebugServices *Debugger::debug_services() const { return debug_services_.get(); }
  499. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  500. DebuggerCommand GetCommand(const EventReply &reply) {
  501. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  502. switch (reply.cmd_case()) {
  503. case debugger::EventReply::CmdCase::kExit:
  504. cmd = DebuggerCommand::kExitCMD;
  505. break;
  506. case debugger::EventReply::CmdCase::kRunCmd:
  507. cmd = DebuggerCommand::kRunCMD;
  508. break;
  509. case debugger::EventReply::CmdCase::kSetCmd:
  510. cmd = DebuggerCommand::kSetCMD;
  511. break;
  512. case debugger::EventReply::CmdCase::kViewCmd:
  513. cmd = DebuggerCommand::kViewCMD;
  514. break;
  515. default:
  516. MS_LOG(ERROR) << "Error: UnknownCMD";
  517. break;
  518. }
  519. return cmd;
  520. }
  521. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  522. if (!reply.has_set_cmd()) {
  523. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  524. return ProtoVector<WatchNode>();
  525. }
  526. return reply.set_cmd().watch_nodes();
  527. }
  528. std::string GetRunLevel(const EventReply &reply) {
  529. if (!reply.has_run_cmd()) {
  530. MS_LOG(ERROR) << "Error: Not RunCMD, can not get RunLevel. Returning default value: "
  531. "";
  532. return "";
  533. }
  534. return reply.run_cmd().run_level();
  535. }
  536. std::string GetNodeName(const EventReply &reply) {
  537. if (!reply.has_run_cmd()) {
  538. MS_LOG(ERROR) << "Error: Not RunCMD, can not get NodeName. Returning default value: "
  539. "";
  540. return "";
  541. }
  542. return reply.run_cmd().node_name();
  543. }
  544. WatchCondition GetWatchcondition(const EventReply &reply) {
  545. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  546. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  547. return WatchCondition();
  548. }
  549. return reply.set_cmd().watch_condition();
  550. }
  551. int32_t GetWatchpointID(const EventReply &reply) {
  552. if (!reply.has_set_cmd()) {
  553. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  554. return 0;
  555. }
  556. return reply.set_cmd().id();
  557. }
  558. bool GetWatchpointDelete(const EventReply &reply) {
  559. if (!reply.has_set_cmd()) {
  560. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  561. return false;
  562. }
  563. return reply.set_cmd().delete_();
  564. }
  565. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  566. if (!reply.has_view_cmd()) {
  567. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  568. return ProtoVector<TensorProto>();
  569. }
  570. return reply.view_cmd().tensors();
  571. }
  572. std::string GetTensorFullName(const TensorProto &tensor) {
  573. string node_name = tensor.node_name();
  574. if (tensor.truncate()) {
  575. // scopes in node name are seperated by '/'
  576. // use the name without scope if truncate is true
  577. std::size_t found = node_name.find_last_of("/");
  578. node_name = node_name.substr(found + 1);
  579. }
  580. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  581. }
  582. bool Debugger::partial_memory() { return partial_memory_; }
  583. void Debugger::SetCurNode(std::string cur_name) {
  584. // access lock for public method
  585. std::lock_guard<std::mutex> a_lock(access_lock_);
  586. cur_name_ = cur_name;
  587. }
  588. std::string Debugger::run_level() const { return run_level_; }
  589. void Debugger::SetStepNum(int32_t cur_num_step) {
  590. // access lock for public method
  591. std::lock_guard<std::mutex> a_lock(access_lock_);
  592. num_step_ = cur_num_step;
  593. }
  594. int32_t Debugger::step_num() const { return num_step_; }
  595. } // namespace mindspore