You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <fstream>
  17. #include <tuple>
  18. #include <vector>
  19. #include <algorithm>
  20. #include "debug/debugger/debugger.h"
  21. #include "pipeline/pipeline.h"
  22. #include "session/anf_runtime_algorithm.h"
  23. using debugger::EventReply;
  24. using debugger::GraphProto;
  25. using debugger::ModelProto;
  26. using debugger::TensorProto;
  27. using debugger::WatchCondition;
  28. using debugger::WatchCondition_Condition_inf;
  29. using debugger::WatchCondition_Condition_nan;
  30. using debugger::WatchNode;
  31. using debugger::WatchpointHit;
  32. namespace mindspore {
  33. DebuggerPtr Debugger::debugger_ = nullptr;
  34. std::mutex Debugger::instance_lock_;
  35. Debugger::Debugger()
  36. : grpc_client_(nullptr),
  37. debug_services_(nullptr),
  38. device_id_(0),
  39. num_step_(0),
  40. debugger_enabled_(false),
  41. is_dataset_graph_(false) {}
  42. void Debugger::Init(const uint32_t device_id) {
  43. // access lock for public method
  44. std::lock_guard<std::mutex> a_lock(access_lock_);
  45. // save device_id
  46. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  47. device_id_ = device_id;
  48. }
  49. void Debugger::EnableDebugger() {
  50. // reset some of the class members
  51. num_step_ = 0;
  52. debugger_enabled_ = false;
  53. grpc_client_ = nullptr;
  54. debug_services_ = nullptr;
  55. // get env variables to configure debugger
  56. const char *env_enable_str = std::getenv("ENABLE_MS_DEBUGGER");
  57. if (env_enable_str != nullptr) {
  58. MS_LOG(INFO) << "Getenv ENABLE_MS_DEBUGGER: " << env_enable_str;
  59. if (std::strcmp(env_enable_str, "1") == 0) {
  60. debugger_enabled_ = true;
  61. }
  62. }
  63. if (!debugger_enabled_) {
  64. MS_LOG(WARNING) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  65. return;
  66. }
  67. // configure host
  68. const char *env_host_str = std::getenv("MS_DEBUGGER_HOST");
  69. std::string host;
  70. if (env_host_str != nullptr) {
  71. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  72. host = std::string(env_host_str);
  73. } else {
  74. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  75. host = "localhost";
  76. }
  77. // configure port
  78. const char *env_port_str = std::getenv("MS_DEBUGGER_PORT");
  79. std::string port;
  80. if (env_port_str != nullptr) {
  81. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  82. port = std::string(env_port_str);
  83. } else {
  84. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  85. port = "50051";
  86. }
  87. // initialize grpc client
  88. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  89. debug_services_ = std::make_unique<DebugServices>();
  90. }
  91. void Debugger::Reset() {
  92. // access lock for public method
  93. std::lock_guard<std::mutex> a_lock(access_lock_);
  94. // reset components
  95. device_id_ = 0;
  96. num_step_ = 0;
  97. debugger_enabled_ = false;
  98. is_dataset_graph_ = false;
  99. graph_ptr_ = nullptr;
  100. grpc_client_ = nullptr;
  101. debug_services_ = nullptr;
  102. }
  103. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  104. // access lock for public method
  105. std::lock_guard<std::mutex> a_lock(access_lock_);
  106. // check and save graph_ptr, suspend if graph is new
  107. CheckGraphPtr(graph_ptr);
  108. }
  109. void Debugger::PostExecute() {
  110. // access lock for public method
  111. std::lock_guard<std::mutex> a_lock(access_lock_);
  112. // analyze tensor data and send the watchpoints been hit
  113. if (debugger_enabled_ && !is_dataset_graph_) {
  114. num_step_++;
  115. MS_LOG(INFO) << "Debugger suspend at end of step; number of steps executed: " << num_step_;
  116. SendWatchpointsAndSuspend(CheckWatchpoints());
  117. }
  118. }
  119. void Debugger::PostDebugOp() {
  120. // access lock for public method
  121. std::lock_guard<std::mutex> a_lock(access_lock_);
  122. // suspend if debugger is enabled
  123. if (debugger_enabled_ && !is_dataset_graph_) {
  124. MS_LOG(INFO) << "Debugger suspend at debug_op";
  125. CommandLoop();
  126. }
  127. }
  128. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  129. if (graph_ptr_ != graph_ptr) {
  130. MS_LOG(INFO) << "Debugger got new graph: " << graph_ptr->graph_id();
  131. // save new graph_ptr
  132. graph_ptr_ = graph_ptr;
  133. // check if it is dataset graph
  134. CheckDatasetGraph();
  135. if (!is_dataset_graph_) {
  136. // only try to enable debugger if it is not a dataset graph
  137. EnableDebugger();
  138. if (debugger_enabled_) {
  139. // get graph proto and send to mindinsight
  140. SendGraphAndSuspend(GetGraphProto());
  141. }
  142. }
  143. }
  144. }
  145. void Debugger::CheckDatasetGraph() {
  146. // print parameter node names
  147. const auto &params = graph_ptr_->inputs();
  148. for (const auto &param : params) {
  149. MS_LOG(INFO) << "param: " << param->fullname_with_scope();
  150. }
  151. // check if there is GetNext or InitDataSetQueue node
  152. const auto &nodes = graph_ptr_->execution_order();
  153. for (const auto &node : nodes) {
  154. auto node_name = AnfAlgo::GetCNodeName(node);
  155. MS_LOG(INFO) << "node: " << node->fullname_with_scope();
  156. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  157. MS_LOG(WARNING) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  158. << node_name;
  159. is_dataset_graph_ = true;
  160. return;
  161. }
  162. }
  163. is_dataset_graph_ = false;
  164. }
  165. GraphProto Debugger::GetGraphProto() {
  166. // convert kernel graph to debugger modelproto
  167. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr_);
  168. return model.graph();
  169. }
  170. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  171. // prepare metadata
  172. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  173. Metadata metadata;
  174. metadata.set_device_name(device_name);
  175. metadata.set_cur_step(num_step_);
  176. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  177. if (reply_metadata.status() != reply_metadata.OK) {
  178. MS_LOG(ERROR) << "Error: SendMetadata failed";
  179. }
  180. // send graph to mindinght server
  181. EventReply reply = grpc_client_->SendGraph(graph_proto);
  182. if (reply.status() != reply.OK) {
  183. MS_LOG(ERROR) << "Error: SendGraph failed";
  184. }
  185. // enter command loop, wait and process commands
  186. CommandLoop();
  187. }
  188. void Debugger::CommandLoop() {
  189. // prepare metadata
  190. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  191. Metadata metadata;
  192. metadata.set_device_name(device_name);
  193. metadata.set_cur_step(num_step_);
  194. // loop exit flag
  195. bool run = false;
  196. int num_wait_fail = 0;
  197. const int max_num_wait_fail = 5;
  198. while (!run) {
  199. // wait for command
  200. EventReply reply = grpc_client_->WaitForCommand(metadata);
  201. if (reply.status() != reply.OK) {
  202. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  203. num_wait_fail++;
  204. if (num_wait_fail > max_num_wait_fail) {
  205. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session";
  206. Exit();
  207. }
  208. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  209. << num_wait_fail << "s";
  210. std::this_thread::sleep_for(std::chrono::milliseconds(1000 * num_wait_fail));
  211. continue;
  212. }
  213. // get type of the command in reply
  214. DebuggerCommand cmd = GetCommand(reply);
  215. if (cmd == DebuggerCommand::kUnknownCMD) {
  216. MS_LOG(ERROR) << "Error: debugger recieved unknown command";
  217. continue;
  218. }
  219. MS_LOG(INFO) << "recieved command: ";
  220. switch (cmd) {
  221. case DebuggerCommand::kUnknownCMD:
  222. MS_LOG(INFO) << "UnknownCMD";
  223. break;
  224. case DebuggerCommand::kExitCMD:
  225. MS_LOG(INFO) << "ExitCMD";
  226. Exit();
  227. break;
  228. case DebuggerCommand::kRunCMD:
  229. MS_LOG(INFO) << "RunCMD";
  230. // exit loop
  231. run = true;
  232. break;
  233. case DebuggerCommand::kSetCMD:
  234. MS_LOG(INFO) << "SetCMD";
  235. {
  236. // print set cmd content
  237. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  238. for (auto node : recieved_nodes) {
  239. MS_LOG(INFO) << "node name: " << node.node_name();
  240. MS_LOG(INFO) << "node type: " << node.node_type();
  241. }
  242. WatchCondition recieved_condition = GetWatchcondition(reply);
  243. MS_LOG(INFO) << "condition: " << recieved_condition.condition();
  244. int32_t id = GetWatchpointID(reply);
  245. MS_LOG(INFO) << "id: " << id;
  246. bool delete_ = GetWatchpointDelete(reply);
  247. MS_LOG(INFO) << "delete: " << delete_;
  248. }
  249. MS_LOG(INFO) << "Setting watchpoint";
  250. if (GetWatchpointDelete(reply)) {
  251. RemoveWatchpoint(GetWatchpointID(reply));
  252. } else {
  253. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply));
  254. }
  255. break;
  256. case DebuggerCommand::kViewCMD:
  257. MS_LOG(INFO) << "ViewCMD";
  258. {
  259. // print view cmd content
  260. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  261. for (auto tensor : received_tensors) {
  262. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  263. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  264. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  265. }
  266. }
  267. MS_LOG(INFO) << "Sending tensors";
  268. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  269. {
  270. for (auto tensor : tensors) {
  271. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  272. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  273. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  274. MS_LOG(INFO) << "tensor dims: ";
  275. for (auto dim : tensor.dims()) {
  276. MS_LOG(INFO) << dim << ",";
  277. }
  278. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  279. }
  280. }
  281. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  282. if (send_tensors_reply.status() != send_tensors_reply.OK) {
  283. MS_LOG(ERROR) << "Error: SendTensors failed";
  284. }
  285. break;
  286. }
  287. }
  288. }
  289. DebuggerCommand Debugger::GetCommand(const EventReply &reply) {
  290. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  291. switch (reply.cmd_case()) {
  292. case debugger::EventReply::CmdCase::kExit:
  293. cmd = DebuggerCommand::kExitCMD;
  294. break;
  295. case debugger::EventReply::CmdCase::kRunCmd:
  296. cmd = DebuggerCommand::kRunCMD;
  297. break;
  298. case debugger::EventReply::CmdCase::kSetCmd:
  299. cmd = DebuggerCommand::kSetCMD;
  300. break;
  301. case debugger::EventReply::CmdCase::kViewCmd:
  302. cmd = DebuggerCommand::kViewCMD;
  303. break;
  304. default:
  305. MS_LOG(ERROR) << "Error: UnknownCMD";
  306. break;
  307. }
  308. return cmd;
  309. }
  310. ProtoVector<WatchNode> Debugger::GetWatchnodes(const EventReply &reply) {
  311. if (!reply.has_set_cmd()) {
  312. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  313. return ProtoVector<WatchNode>();
  314. }
  315. return reply.set_cmd().watch_nodes();
  316. }
  317. WatchCondition Debugger::GetWatchcondition(const EventReply &reply) {
  318. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  319. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  320. return WatchCondition();
  321. }
  322. return reply.set_cmd().watch_condition();
  323. }
  324. int32_t Debugger::GetWatchpointID(const EventReply &reply) {
  325. if (!reply.has_set_cmd()) {
  326. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  327. return 0;
  328. }
  329. return reply.set_cmd().id();
  330. }
  331. bool Debugger::GetWatchpointDelete(const EventReply &reply) {
  332. if (!reply.has_set_cmd()) {
  333. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  334. return false;
  335. }
  336. return reply.set_cmd().delete_();
  337. }
  338. ProtoVector<TensorProto> Debugger::GetTensors(const EventReply &reply) {
  339. if (!reply.has_view_cmd()) {
  340. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  341. return ProtoVector<TensorProto>();
  342. }
  343. return reply.view_cmd().tensors();
  344. }
  345. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id) {
  346. std::vector<std::tuple<std::string, bool>> check_node_list;
  347. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  348. [](WatchNode node) -> std::tuple<std::string, bool> {
  349. return make_tuple(node.node_name(), node.node_type() == "scope");
  350. });
  351. debug_services_->add_watchpoint(id, condition.condition(), check_node_list);
  352. }
  353. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->remove_watchpoint(id); }
  354. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) {
  355. std::vector<std::string> name;
  356. std::vector<std::string> ret_name;
  357. std::vector<char *> data_ptr;
  358. std::vector<unsigned int> data_size;
  359. std::vector<TypePtr> dtype;
  360. std::vector<std::vector<int>> shape;
  361. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name),
  362. [](TensorProto tensor) -> std::string { return tensor.node_name() + ":" + tensor.slot(); });
  363. debug_services_->read_nodes_tensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  364. std::list<TensorProto> tensor_list;
  365. unsigned int result_index = 0;
  366. TensorProto tensor_item;
  367. for (auto tensor : tensors) {
  368. tensor_item.set_node_name(tensor.node_name());
  369. tensor_item.set_slot(tensor.slot());
  370. tensor_item.set_finished(true);
  371. // return empty tensor if didn't find the requested tensor
  372. if (result_index >= ret_name.size() || ret_name[result_index] != tensor.node_name() + ":" + tensor.slot()) {
  373. tensor_list.push_back(tensor_item);
  374. continue;
  375. }
  376. tensor_item.set_tensor_content(data_ptr[result_index], data_size[result_index]);
  377. tensor_item.set_data_type(GetDebuggerNumberDataType(dtype[result_index]));
  378. tensor_item.clear_dims();
  379. for (auto &elem : shape[result_index]) {
  380. tensor_item.add_dims(elem);
  381. }
  382. tensor_list.push_back(tensor_item);
  383. result_index++;
  384. }
  385. return tensor_list;
  386. }
  387. void Debugger::Exit() {
  388. // clear resource before exit
  389. pipeline::ClearResAtexit();
  390. std::exit(EXIT_FAILURE);
  391. }
  392. std::list<WatchpointHit> Debugger::CheckWatchpoints() {
  393. std::vector<std::string> name;
  394. std::vector<std::string> slot;
  395. std::vector<char *> data_ptr;
  396. std::vector<unsigned int> data_size;
  397. std::vector<int> condition;
  398. std::vector<unsigned int> watchpoint_id;
  399. debug_services_->check_watchpoints(&name, &slot, &data_ptr, &data_size, &condition, &watchpoint_id);
  400. std::list<WatchpointHit> points;
  401. for (unsigned int i = 0; i < name.size(); i++) {
  402. TensorProto *tensor_item;
  403. tensor_item = new TensorProto();
  404. tensor_item->set_node_name(name[i]);
  405. tensor_item->set_slot(slot[i]);
  406. tensor_item->set_tensor_content(data_ptr[i], data_size[i]);
  407. // finished in TensorProto will always be true before we implement big tensor splitting
  408. tensor_item->set_finished(true);
  409. WatchCondition *condition_item;
  410. condition_item = new WatchCondition();
  411. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  412. WatchpointHit point;
  413. point.set_allocated_tensor(tensor_item);
  414. point.set_allocated_watch_condition(condition_item);
  415. point.set_id(watchpoint_id[i]);
  416. points.push_back(point);
  417. }
  418. return points;
  419. }
  420. void Debugger::SendWatchpointsAndSuspend(const std::list<WatchpointHit> &points) {
  421. // send info about watchpoint
  422. if (!points.empty()) {
  423. EventReply reply = grpc_client_->SendWatchpointHits(points);
  424. if (reply.status() != reply.OK) {
  425. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  426. }
  427. }
  428. // enter command loop
  429. CommandLoop();
  430. }
  431. DebugServices *Debugger::get_debug_services() { return debug_services_.get(); }
  432. bool Debugger::debugger_enabled() { return debugger_enabled_; }
  433. } // namespace mindspore