You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 27 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <dirent.h>
  17. #include <stdio.h>
  18. #include <fstream>
  19. #include <tuple>
  20. #include <vector>
  21. #include <algorithm>
  22. #include <iostream>
  23. #include <cstring>
  24. #include <utility>
  25. #include <map>
  26. #include "debug/debugger/debugger.h"
  27. #include "debug/data_dump_parser.h"
  28. #include "pipeline/jit/pipeline.h"
  29. #include "backend/session/anf_runtime_algorithm.h"
  30. #include "runtime/device/kernel_runtime_manager.h"
  31. using debugger::EventReply;
  32. using debugger::GraphProto;
  33. using debugger::ModelProto;
  34. using debugger::TensorProto;
  35. using debugger::WatchCondition;
  36. using debugger::WatchCondition_Condition_inf;
  37. using debugger::WatchCondition_Condition_nan;
  38. using debugger::WatchNode;
  39. using debugger::WatchpointHit;
  40. #define CHUNK_SIZE 1024 * 1024 * 3
  41. namespace mindspore {
  42. DebuggerPtr Debugger::debugger_ = nullptr;
  43. std::mutex Debugger::instance_lock_;
  44. Debugger::Debugger()
  45. : grpc_client_(nullptr),
  46. debug_services_(nullptr),
  47. device_id_(0),
  48. device_target_(""),
  49. num_step_(0),
  50. debugger_enabled_(false),
  51. run_level_(""),
  52. node_name_(""),
  53. cur_name_(""),
  54. is_dataset_graph_(false),
  55. partial_memory_(false),
  56. last_overflow_bin_(0),
  57. overflow_bin_path_("") {}
  58. void Debugger::Init(const uint32_t device_id, const std::string device_target) {
  59. // access lock for public method
  60. std::lock_guard<std::mutex> a_lock(access_lock_);
  61. // save device_id
  62. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  63. device_id_ = device_id;
  64. MS_LOG(INFO) << "Debugger got device_target: " << device_target;
  65. device_target_ = device_target;
  66. }
  67. void Debugger::EnableDebugger() {
  68. // reset some of the class members
  69. num_step_ = 0;
  70. debugger_enabled_ = false;
  71. partial_memory_ = false;
  72. grpc_client_ = nullptr;
  73. debug_services_ = nullptr;
  74. // see if dump using debugger backend is enabled
  75. bool dump_enabled = CheckDebuggerDumpEnabled();
  76. MS_LOG(INFO) << "dump using debugger backend = " << dump_enabled;
  77. // check if debugger enabled
  78. debugger_enabled_ = CheckDebuggerEnabled();
  79. MS_LOG(INFO) << "debugger_enabled_ = " << debugger_enabled_;
  80. if (!debugger_enabled_ && !dump_enabled) {
  81. MS_LOG(INFO) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  82. return;
  83. }
  84. // configure grpc host
  85. const char *env_host_str = std::getenv("MS_DEBUGGER_HOST");
  86. std::string host;
  87. if (env_host_str != nullptr) {
  88. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  89. host = std::string(env_host_str);
  90. } else {
  91. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  92. host = "localhost";
  93. }
  94. // configure grpc port
  95. const char *env_port_str = std::getenv("MS_DEBUGGER_PORT");
  96. std::string port;
  97. if (env_port_str != nullptr) {
  98. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  99. port = std::string(env_port_str);
  100. } else {
  101. MS_LOG(INFO) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  102. port = "50051";
  103. }
  104. // configure partial memory reuse
  105. const char *env_partial_mem_str = std::getenv("MS_DEBUGGER_PARTIAL_MEM");
  106. if (env_partial_mem_str != nullptr) {
  107. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
  108. if (std::strcmp(env_partial_mem_str, "1") == 0) {
  109. partial_memory_ = true;
  110. }
  111. }
  112. // switch memory reuse on or off
  113. auto context_ptr = MsContext::GetInstance();
  114. MS_EXCEPTION_IF_NULL(context_ptr);
  115. context_ptr->set_param<bool>(MS_CTX_ENABLE_MEM_REUSE, partial_memory_);
  116. // print some message about memory reuse to user
  117. if (partial_memory_) {
  118. MS_LOG(WARNING) << "Partial Memory Reuse is enabled. Note: 1. Please only set watchpoints before running the first "
  119. "step. 2. Tensor values are only available for nodes that are watched by any watchpoint.";
  120. } else {
  121. MS_LOG(INFO) << "Memory Reuse is disabled. Set environment variable MS_DEBUGGER_PARTIAL_MEM=1 to reduce memory "
  122. "usage for large models.";
  123. }
  124. #ifdef ENABLE_D
  125. // set operation overflow info
  126. overflow_bin_path_ = DataDumpParser::GetInstance().GetOpOverflowBinPath(graph_ptr_->graph_id(), device_id_);
  127. // new overflow dump files will have a timestamp greater than last_overflow_bin_
  128. last_overflow_bin_ = 0;
  129. DIR *d;
  130. d = opendir(overflow_bin_path_.c_str());
  131. if (d != nullptr) {
  132. struct dirent *dir;
  133. while ((dir = readdir(d)) != NULL) {
  134. if (dir->d_type == DT_REG) {
  135. std::string file_path = overflow_bin_path_;
  136. file_path.append(dir->d_name);
  137. std::size_t found = file_path.find_last_of(".");
  138. if (found == std::string::npos) {
  139. continue;
  140. }
  141. std::string overflow_time = file_path.substr(found + 1);
  142. if (stod(overflow_time) <= last_overflow_bin_) {
  143. MS_LOG(INFO) << "Old op overflow bin folder" << file_path;
  144. continue;
  145. }
  146. last_overflow_bin_ = stod(overflow_time);
  147. }
  148. }
  149. MS_LOG(INFO) << "last op overflow bin folder" << last_overflow_bin_;
  150. }
  151. #endif
  152. // initialize grpc client
  153. if (debugger_enabled_) {
  154. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  155. }
  156. debug_services_ = std::make_unique<DebugServices>();
  157. }
  158. bool Debugger::CheckDebuggerDumpEnabled() {
  159. // see if dump is enabled
  160. if (device_target_ == kGPUDevice) {
  161. auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
  162. MS_EXCEPTION_IF_NULL(runtime_instance);
  163. return runtime_instance->DumpDataEnabled();
  164. }
  165. return false;
  166. }
  167. bool Debugger::CheckDebuggerEnabled() {
  168. // get env variables to configure debugger
  169. const char *env_enable_str = std::getenv("ENABLE_MS_DEBUGGER");
  170. if (env_enable_str != nullptr) {
  171. if (std::strcmp(env_enable_str, "1") == 0) {
  172. return true;
  173. }
  174. }
  175. return false;
  176. }
  177. bool Debugger::DebuggerBackendEnabled() { return CheckDebuggerDumpEnabled() || CheckDebuggerEnabled(); }
  178. void Debugger::Reset() {
  179. // access lock for public method
  180. std::lock_guard<std::mutex> a_lock(access_lock_);
  181. // reset components
  182. device_id_ = 0;
  183. device_target_ = "";
  184. num_step_ = 0;
  185. debugger_enabled_ = false;
  186. is_dataset_graph_ = false;
  187. partial_memory_ = false;
  188. graph_ptr_ = nullptr;
  189. grpc_client_ = nullptr;
  190. debug_services_ = nullptr;
  191. last_overflow_bin_ = 0;
  192. overflow_bin_path_ = "";
  193. stream_task_to_opname_.clear();
  194. }
  195. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  196. // access lock for public method
  197. std::lock_guard<std::mutex> a_lock(access_lock_);
  198. if (debugger_->DebuggerBackendEnabled()) {
  199. // check and save graph_ptr, suspend if graph is new
  200. CheckGraphPtr(graph_ptr);
  201. }
  202. }
  203. void Debugger::PostExecute() {
  204. // access lock for public method
  205. std::lock_guard<std::mutex> a_lock(access_lock_);
  206. if (debugger_->DebuggerBackendEnabled()) {
  207. // analyze tensor data and send the watchpoints been hit
  208. if (run_level_ == "node") {
  209. MS_LOG(INFO) << "Debugger is in node level mode ";
  210. return;
  211. }
  212. if (debugger_enabled_ && !is_dataset_graph_) {
  213. if (device_target_ != kGPUDevice) {
  214. num_step_++;
  215. MS_LOG(INFO) << "Debugger suspend at end of step; number of steps executed: " << num_step_;
  216. SendWatchpointsAndSuspend(CheckWatchpoints());
  217. } else {
  218. CommandLoop();
  219. }
  220. }
  221. }
  222. }
  223. bool Debugger::ReadNodeDataRequired() {
  224. if (debugger_enabled_ && !is_dataset_graph_) {
  225. auto watchpoint_table = debug_services_->GetWatchpointTable();
  226. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, watchpoint_table);
  227. // if node has a watchpoint on it, is next_to node, or continue_to node then read the kernel tensor data
  228. if (is_watchpoint || (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_))) {
  229. return true;
  230. }
  231. }
  232. return false;
  233. }
  234. void Debugger::PostExecuteNode() {
  235. // access lock for public method
  236. std::lock_guard<std::mutex> a_lock(access_lock_);
  237. if (debugger_enabled_ && !is_dataset_graph_) {
  238. auto watchpoint_table = debug_services_->GetWatchpointTable();
  239. auto is_watchpoint = debug_services_->IsWatchPoint(cur_name_, watchpoint_table);
  240. // if kernel is watchpoint,and get hit. suspend.
  241. if (is_watchpoint) {
  242. auto hits = CheckWatchpoints(cur_name_);
  243. if (!hits.empty()) {
  244. SendWatchpointsAndSuspend(hits);
  245. }
  246. }
  247. // if kernel is not watchpoint and is next_to or continue_to node, suspend.
  248. if (run_level_ == "node" && (node_name_ == "" || node_name_ == cur_name_)) {
  249. CommandLoop();
  250. }
  251. return;
  252. }
  253. }
  254. void Debugger::PostDebugOp() {
  255. // access lock for public method
  256. std::lock_guard<std::mutex> a_lock(access_lock_);
  257. // suspend if debugger is enabled
  258. if (debugger_enabled_ && !is_dataset_graph_) {
  259. MS_LOG(INFO) << "Debugger suspend at debug_op";
  260. CommandLoop();
  261. }
  262. }
  263. std::map<std::pair<uint32_t, uint32_t>, std::string> &Debugger::GetStreamTaskToOpnameMap() {
  264. return stream_task_to_opname_;
  265. }
  266. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  267. if (graph_ptr_ != graph_ptr) {
  268. MS_LOG(INFO) << "Debugger got new graph: " << graph_ptr->graph_id();
  269. // save new graph_ptr
  270. graph_ptr_ = graph_ptr;
  271. // check if it is dataset graph
  272. CheckDatasetGraph();
  273. if (!is_dataset_graph_) {
  274. // only try to enable debugger if it is not a dataset graph
  275. EnableDebugger();
  276. if (debugger_enabled_) {
  277. // get graph proto and send to mindinsight
  278. SendGraphAndSuspend(GetGraphProto());
  279. }
  280. }
  281. }
  282. }
  283. void Debugger::CheckDatasetGraph() {
  284. // print parameter node names
  285. const auto &params = graph_ptr_->inputs();
  286. for (const auto &param : params) {
  287. MS_LOG(INFO) << "param: " << param->fullname_with_scope();
  288. }
  289. // check if there is GetNext or InitDataSetQueue node
  290. const auto &nodes = graph_ptr_->execution_order();
  291. for (const auto &node : nodes) {
  292. auto node_name = AnfAlgo::GetCNodeName(node);
  293. MS_LOG(INFO) << "node: " << node->fullname_with_scope();
  294. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  295. MS_LOG(INFO) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  296. << node_name;
  297. is_dataset_graph_ = true;
  298. return;
  299. }
  300. }
  301. is_dataset_graph_ = false;
  302. }
  303. GraphProto Debugger::GetGraphProto() const {
  304. // convert kernel graph to debugger modelproto
  305. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr_);
  306. return model.graph();
  307. }
  308. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  309. // prepare metadata
  310. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  311. Metadata metadata;
  312. metadata.set_device_name(device_name);
  313. metadata.set_cur_step(num_step_);
  314. metadata.set_backend(device_target_);
  315. metadata.set_cur_node(cur_name_);
  316. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  317. if (reply_metadata.status() != reply_metadata.OK) {
  318. MS_LOG(ERROR) << "Error: SendMetadata failed";
  319. }
  320. // send graph to mindinght server
  321. EventReply reply = grpc_client_->SendGraph(graph_proto);
  322. if (reply.status() != reply.OK) {
  323. MS_LOG(ERROR) << "Error: SendGraph failed";
  324. }
  325. // enter command loop, wait and process commands
  326. CommandLoop();
  327. }
  328. void Debugger::CommandLoop() {
  329. // prepare metadata
  330. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  331. Metadata metadata;
  332. metadata.set_device_name(device_name);
  333. metadata.set_cur_step(num_step_);
  334. metadata.set_backend(device_target_);
  335. metadata.set_cur_node(cur_name_);
  336. // loop exit flag
  337. bool run = false;
  338. int num_wait_fail = 0;
  339. const int max_num_wait_fail = 5;
  340. while (!run) {
  341. // wait for command
  342. EventReply reply = grpc_client_->WaitForCommand(metadata);
  343. if (reply.status() != reply.OK) {
  344. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  345. num_wait_fail++;
  346. if (num_wait_fail > max_num_wait_fail) {
  347. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session";
  348. Exit();
  349. }
  350. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  351. << num_wait_fail << "s";
  352. std::this_thread::sleep_for(std::chrono::milliseconds(1000 * num_wait_fail));
  353. continue;
  354. }
  355. // get type of the command in reply
  356. DebuggerCommand cmd = GetCommand(reply);
  357. if (cmd == DebuggerCommand::kUnknownCMD) {
  358. MS_LOG(ERROR) << "Error: debugger recieved unknown command";
  359. continue;
  360. }
  361. MS_LOG(INFO) << "recieved command: ";
  362. switch (cmd) {
  363. case DebuggerCommand::kUnknownCMD:
  364. MS_LOG(INFO) << "UnknownCMD";
  365. break;
  366. case DebuggerCommand::kExitCMD:
  367. MS_LOG(INFO) << "ExitCMD";
  368. Exit();
  369. break;
  370. case DebuggerCommand::kRunCMD:
  371. MS_LOG(INFO) << "RunCMD";
  372. {
  373. // print run cmd content
  374. // get run_level and node_name
  375. run_level_ = GetRunLevel(reply);
  376. node_name_ = GetNodeName(reply);
  377. MS_LOG(INFO) << "run_level: " << run_level_;
  378. MS_LOG(INFO) << "node_name_: " << node_name_;
  379. }
  380. // exit loop
  381. run = true;
  382. break;
  383. case DebuggerCommand::kSetCMD:
  384. MS_LOG(INFO) << "SetCMD";
  385. {
  386. // print set cmd content
  387. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  388. for (auto node : recieved_nodes) {
  389. MS_LOG(INFO) << "node name: " << node.node_name();
  390. MS_LOG(INFO) << "node type: " << node.node_type();
  391. }
  392. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  393. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  394. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  395. }
  396. MS_LOG(INFO) << "Setting watchpoint";
  397. if (GetWatchpointDelete(reply)) {
  398. RemoveWatchpoint(GetWatchpointID(reply));
  399. } else {
  400. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply));
  401. }
  402. break;
  403. case DebuggerCommand::kViewCMD:
  404. MS_LOG(INFO) << "ViewCMD";
  405. {
  406. // print view cmd content
  407. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  408. for (auto tensor : received_tensors) {
  409. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  410. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  411. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  412. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  413. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  414. }
  415. }
  416. MS_LOG(INFO) << "Sending tensors";
  417. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  418. {
  419. // print view cmd reply
  420. for (auto tensor : tensors) {
  421. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  422. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  423. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  424. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  425. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  426. MS_LOG(INFO) << "tensor dims: ";
  427. for (auto dim : tensor.dims()) {
  428. MS_LOG(INFO) << dim << ",";
  429. }
  430. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  431. }
  432. }
  433. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  434. if (send_tensors_reply.status() != send_tensors_reply.OK) {
  435. MS_LOG(ERROR) << "Error: SendTensors failed";
  436. }
  437. break;
  438. }
  439. }
  440. }
  441. void AddTensorProtoInfo(TensorProto *tensor_item, TensorProto tensor) {
  442. tensor_item->set_node_name(tensor.node_name());
  443. tensor_item->set_slot(tensor.slot());
  444. tensor_item->set_iter(tensor.iter());
  445. tensor_item->set_truncate(tensor.truncate());
  446. tensor_item->clear_tensor_content();
  447. tensor_item->clear_data_type();
  448. tensor_item->clear_dims();
  449. }
  450. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id) {
  451. std::vector<std::tuple<std::string, bool>> check_node_list;
  452. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  453. [](WatchNode node) -> std::tuple<std::string, bool> {
  454. return make_tuple(node.node_name(), node.node_type() == "scope");
  455. });
  456. debug_services_->AddWatchpoint(id, condition.condition(), condition.value(), check_node_list);
  457. }
  458. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->RemoveWatchpoint(id); }
  459. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  460. std::vector<std::string> name;
  461. std::vector<std::string> ret_name;
  462. std::vector<char *> data_ptr;
  463. std::vector<unsigned int> data_size;
  464. std::vector<TypePtr> dtype;
  465. std::vector<std::vector<int>> shape;
  466. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  467. // ret_name will contain tensor names that are found in TensorLoader
  468. // items in ret_name will be in the same order with tensors if found
  469. debug_services_->ReadNodesTensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  470. std::list<TensorProto> tensor_list;
  471. unsigned int result_index = 0;
  472. for (auto tensor : tensors) {
  473. int size_iter = 0;
  474. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  475. TensorProto tensor_item;
  476. tensor_item.set_finished(true);
  477. AddTensorProtoInfo(&tensor_item, tensor);
  478. tensor_list.push_back(tensor_item);
  479. continue;
  480. }
  481. int tensor_size = data_size[result_index];
  482. while (size_iter < tensor_size) {
  483. int chunk_size = CHUNK_SIZE;
  484. TensorProto tensor_item;
  485. tensor_item.set_finished(false);
  486. if (tensor_size - size_iter <= CHUNK_SIZE) {
  487. chunk_size = tensor_size - size_iter;
  488. tensor_item.set_finished(true);
  489. }
  490. AddTensorProtoInfo(&tensor_item, tensor);
  491. // return empty tensor if didn't find the requested tensor
  492. tensor_item.set_tensor_content(data_ptr[result_index] + size_iter, chunk_size);
  493. tensor_item.set_data_type(GetDebuggerNumberDataType(dtype[result_index]));
  494. for (auto &elem : shape[result_index]) {
  495. tensor_item.add_dims(elem);
  496. }
  497. // add tensor to result list and increment result_index to check next item in ret_name
  498. tensor_list.push_back(tensor_item);
  499. size_iter += CHUNK_SIZE;
  500. }
  501. result_index++;
  502. }
  503. return tensor_list;
  504. }
  505. void Debugger::Exit() {
  506. // clear resource before exit
  507. pipeline::ClearResAtexit();
  508. std::exit(EXIT_FAILURE);
  509. }
  510. std::list<WatchpointHit> Debugger::CheckWatchpoints(const std::string &watchnode) {
  511. std::vector<std::string> name;
  512. std::vector<std::string> slot;
  513. std::vector<int> condition;
  514. std::vector<unsigned int> watchpoint_id;
  515. std::vector<std::string> overflow_ops;
  516. #ifdef ENABLE_D
  517. overflow_ops = CheckOpOverflow();
  518. #endif
  519. auto tensor_loader = debug_services_->tensor_loader();
  520. std::vector<std::shared_ptr<TensorData>> tensor_list;
  521. if (watchnode.empty()) {
  522. tensor_list = tensor_loader->GetTensor();
  523. } else {
  524. tensor_list = tensor_loader->GetNodeTensorMap(watchnode);
  525. }
  526. debug_services_->CheckWatchpoints(&name, &slot, &condition, &watchpoint_id, overflow_ops, tensor_list);
  527. std::list<WatchpointHit> hits;
  528. for (unsigned int i = 0; i < name.size(); i++) {
  529. WatchpointHit hit;
  530. hit.set_id(watchpoint_id[i]);
  531. // here TensorProto act as a tensor indicator, not sending tensor content
  532. TensorProto *tensor_item = hit.mutable_tensor();
  533. tensor_item->set_node_name(name[i]);
  534. tensor_item->set_slot(slot[i]);
  535. tensor_item->set_finished(true);
  536. WatchCondition *condition_item = hit.mutable_watch_condition();
  537. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  538. hits.push_back(hit);
  539. }
  540. return hits;
  541. }
  542. void Debugger::SendWatchpointsAndSuspend(const std::list<WatchpointHit> &points) {
  543. // send info about watchpoint
  544. if (!points.empty()) {
  545. EventReply reply = grpc_client_->SendWatchpointHits(points);
  546. if (reply.status() != reply.OK) {
  547. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  548. }
  549. }
  550. // enter command loop
  551. CommandLoop();
  552. }
  553. DebugServices *Debugger::debug_services() const { return debug_services_.get(); }
  554. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  555. DebuggerCommand GetCommand(const EventReply &reply) {
  556. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  557. switch (reply.cmd_case()) {
  558. case debugger::EventReply::CmdCase::kExit:
  559. cmd = DebuggerCommand::kExitCMD;
  560. break;
  561. case debugger::EventReply::CmdCase::kRunCmd:
  562. cmd = DebuggerCommand::kRunCMD;
  563. break;
  564. case debugger::EventReply::CmdCase::kSetCmd:
  565. cmd = DebuggerCommand::kSetCMD;
  566. break;
  567. case debugger::EventReply::CmdCase::kViewCmd:
  568. cmd = DebuggerCommand::kViewCMD;
  569. break;
  570. default:
  571. MS_LOG(DEBUG) << "Debug: UnknownCMD";
  572. break;
  573. }
  574. return cmd;
  575. }
  576. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  577. if (!reply.has_set_cmd()) {
  578. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  579. return ProtoVector<WatchNode>();
  580. }
  581. return reply.set_cmd().watch_nodes();
  582. }
  583. std::string GetRunLevel(const EventReply &reply) {
  584. if (!reply.has_run_cmd()) {
  585. MS_LOG(ERROR) << "Error: Not RunCMD, can not get RunLevel. Returning default value: "
  586. "";
  587. return "";
  588. }
  589. return reply.run_cmd().run_level();
  590. }
  591. std::string GetNodeName(const EventReply &reply) {
  592. if (!reply.has_run_cmd()) {
  593. MS_LOG(ERROR) << "Error: Not RunCMD, can not get NodeName. Returning default value: "
  594. "";
  595. return "";
  596. }
  597. return reply.run_cmd().node_name();
  598. }
  599. WatchCondition GetWatchcondition(const EventReply &reply) {
  600. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  601. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  602. return WatchCondition();
  603. }
  604. return reply.set_cmd().watch_condition();
  605. }
  606. int32_t GetWatchpointID(const EventReply &reply) {
  607. if (!reply.has_set_cmd()) {
  608. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  609. return 0;
  610. }
  611. return reply.set_cmd().id();
  612. }
  613. bool GetWatchpointDelete(const EventReply &reply) {
  614. if (!reply.has_set_cmd()) {
  615. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  616. return false;
  617. }
  618. return reply.set_cmd().delete_();
  619. }
  620. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  621. if (!reply.has_view_cmd()) {
  622. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  623. return ProtoVector<TensorProto>();
  624. }
  625. return reply.view_cmd().tensors();
  626. }
  627. std::string GetTensorFullName(const TensorProto &tensor) {
  628. string node_name = tensor.node_name();
  629. if (tensor.truncate()) {
  630. // scopes in node name are seperated by '/'
  631. // use the name without scope if truncate is true
  632. std::size_t found = node_name.find_last_of("/");
  633. node_name = node_name.substr(found + 1);
  634. }
  635. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  636. }
  637. bool Debugger::partial_memory() { return partial_memory_; }
  638. void Debugger::SetCurNode(std::string cur_name) {
  639. // access lock for public method
  640. std::lock_guard<std::mutex> a_lock(access_lock_);
  641. cur_name_ = cur_name;
  642. }
  643. std::string Debugger::run_level() const { return run_level_; }
  644. void Debugger::SetStepNum(int32_t cur_num_step) {
  645. // access lock for public method
  646. std::lock_guard<std::mutex> a_lock(access_lock_);
  647. num_step_ = cur_num_step;
  648. }
  649. int32_t Debugger::step_num() const { return num_step_; }
  650. uint64_t BytestoInt64(const std::vector<char> &buffer) {
  651. uint64_t ret;
  652. ret = ((uint64_t)buffer[7] << 56) | ((uint64_t)buffer[6] << 48) | ((uint64_t)buffer[5] << 40) |
  653. ((uint64_t)buffer[4] << 32) | (buffer[3] << 24) | (buffer[2] << 16) | (buffer[1] << 8) | buffer[0];
  654. return ret;
  655. }
  656. #define BUF_SIZ 256
  657. std::vector<std::string> Debugger::CheckOpOverflow() {
  658. std::vector<double> bin_list;
  659. std::vector<std::string> op_names;
  660. DIR *d;
  661. struct dirent *dir = nullptr;
  662. d = opendir(overflow_bin_path_.c_str());
  663. if (d != nullptr) {
  664. while ((dir = readdir(d)) != NULL) {
  665. if (dir->d_type == DT_REG) {
  666. std::string file_path = overflow_bin_path_;
  667. file_path.append(dir->d_name);
  668. std::string file_name = dir->d_name;
  669. std::size_t found = file_name.find_last_of(".");
  670. if (found == std::string::npos) {
  671. continue;
  672. }
  673. std::string overflow_time = file_name.substr(found + 1);
  674. if (stod(overflow_time) <= last_overflow_bin_) {
  675. MS_LOG(INFO) << "File already processed " << file_name;
  676. continue;
  677. }
  678. bin_list.push_back(stod(overflow_time));
  679. std::fstream infile;
  680. infile.open(file_path.c_str(), std::ios::binary | std::ios::in);
  681. infile.seekg(313, std::ios::beg);
  682. std::vector<char> buffer;
  683. buffer.resize(BUF_SIZ);
  684. infile.read(buffer.data(), BUF_SIZ);
  685. uint64_t stream_id = BytestoInt64(std::vector<char>(buffer.begin() + 8, buffer.end()));
  686. uint64_t task_id = BytestoInt64(std::vector<char>(buffer.begin() + 16, buffer.end()));
  687. MS_LOG(INFO) << "Overflow stream_id " << stream_id << ", task_id " << task_id << ".";
  688. auto op = debugger_->stream_task_to_opname_.find(std::make_pair(stream_id, task_id));
  689. if (op != debugger_->stream_task_to_opname_.end()) {
  690. MS_LOG(ERROR) << "Overflow detected on node " << op->second << std::endl;
  691. op_names.push_back(op->second);
  692. } else {
  693. MS_LOG(INFO) << "No overflow is detected " << std::endl;
  694. }
  695. infile.close();
  696. }
  697. }
  698. } else {
  699. MS_LOG(INFO) << "OverFlow bin directory does not exist!";
  700. }
  701. closedir(d);
  702. if (op_names.size()) {
  703. MS_LOG(ERROR) << "These operation overflows are detected " << op_names;
  704. }
  705. for (auto &i : bin_list) {
  706. if (i > last_overflow_bin_) {
  707. last_overflow_bin_ = i;
  708. }
  709. }
  710. return op_names;
  711. }
  712. } // namespace mindspore