You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.cc 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <fstream>
  17. #include <tuple>
  18. #include <vector>
  19. #include <algorithm>
  20. #include "debug/debugger/debugger.h"
  21. #include "pipeline/pipeline.h"
  22. #include "session/anf_runtime_algorithm.h"
  23. using debugger::EventReply;
  24. using debugger::GraphProto;
  25. using debugger::ModelProto;
  26. using debugger::TensorProto;
  27. using debugger::WatchCondition;
  28. using debugger::WatchCondition_Condition_inf;
  29. using debugger::WatchCondition_Condition_nan;
  30. using debugger::WatchNode;
  31. using debugger::WatchpointHit;
  32. namespace mindspore {
  33. DebuggerPtr Debugger::debugger_ = nullptr;
  34. std::mutex Debugger::instance_lock_;
  35. Debugger::Debugger()
  36. : grpc_client_(nullptr),
  37. debug_services_(nullptr),
  38. device_id_(0),
  39. num_step_(0),
  40. debugger_enabled_(false),
  41. is_dataset_graph_(false) {}
  42. void Debugger::Init(const uint32_t device_id) {
  43. // access lock for public method
  44. std::lock_guard<std::mutex> a_lock(access_lock_);
  45. // save device_id
  46. MS_LOG(INFO) << "Debugger got device_id: " << device_id;
  47. device_id_ = device_id;
  48. }
  49. void Debugger::EnableDebugger() {
  50. // reset some of the class members
  51. num_step_ = 0;
  52. debugger_enabled_ = false;
  53. grpc_client_ = nullptr;
  54. debug_services_ = nullptr;
  55. // get env variables to configure debugger
  56. const char *env_enable_str = std::getenv("ENABLE_MS_DEBUGGER");
  57. if (env_enable_str != nullptr) {
  58. MS_LOG(INFO) << "Getenv ENABLE_MS_DEBUGGER: " << env_enable_str;
  59. if (std::strcmp(env_enable_str, "1") == 0) {
  60. debugger_enabled_ = true;
  61. }
  62. }
  63. if (!debugger_enabled_) {
  64. MS_LOG(WARNING) << "Not enabling debugger. Set environment variable ENABLE_MS_DEBUGGER=1 to enable debugger.";
  65. return;
  66. }
  67. // configure host
  68. const char *env_host_str = std::getenv("MS_DEBUGGER_HOST");
  69. std::string host;
  70. if (env_host_str != nullptr) {
  71. MS_LOG(INFO) << "Getenv MS_DEBUGGER_HOST: " << env_host_str;
  72. host = std::string(env_host_str);
  73. } else {
  74. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_HOST doesn't exist. Using default debugger host: localhost";
  75. host = "localhost";
  76. }
  77. // configure port
  78. const char *env_port_str = std::getenv("MS_DEBUGGER_PORT");
  79. std::string port;
  80. if (env_port_str != nullptr) {
  81. MS_LOG(INFO) << "Getenv MS_DEBUGGER_PORT: " << env_port_str;
  82. port = std::string(env_port_str);
  83. } else {
  84. MS_LOG(WARNING) << "Environment variable MS_DEBUGGER_PORT doesn't exist. Using default debugger port: 50051";
  85. port = "50051";
  86. }
  87. // initialize grpc client
  88. grpc_client_ = std::make_unique<GrpcClient>(host, port);
  89. debug_services_ = std::make_unique<DebugServices>();
  90. }
  91. void Debugger::Reset() {
  92. // access lock for public method
  93. std::lock_guard<std::mutex> a_lock(access_lock_);
  94. // reset components
  95. device_id_ = 0;
  96. num_step_ = 0;
  97. debugger_enabled_ = false;
  98. is_dataset_graph_ = false;
  99. graph_ptr_ = nullptr;
  100. grpc_client_ = nullptr;
  101. debug_services_ = nullptr;
  102. }
  103. void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
  104. // access lock for public method
  105. std::lock_guard<std::mutex> a_lock(access_lock_);
  106. // check and save graph_ptr, suspend if graph is new
  107. CheckGraphPtr(graph_ptr);
  108. }
  109. void Debugger::PostExecute() {
  110. // access lock for public method
  111. std::lock_guard<std::mutex> a_lock(access_lock_);
  112. // analyze tensor data and send the watchpoints been hit
  113. if (debugger_enabled_ && !is_dataset_graph_) {
  114. num_step_++;
  115. MS_LOG(INFO) << "Debugger suspend at end of step; number of steps executed: " << num_step_;
  116. SendWatchpointsAndSuspend(CheckWatchpoints());
  117. }
  118. }
  119. void Debugger::PostDebugOp() {
  120. // access lock for public method
  121. std::lock_guard<std::mutex> a_lock(access_lock_);
  122. // suspend if debugger is enabled
  123. if (debugger_enabled_ && !is_dataset_graph_) {
  124. MS_LOG(INFO) << "Debugger suspend at debug_op";
  125. CommandLoop();
  126. }
  127. }
  128. void Debugger::CheckGraphPtr(const KernelGraphPtr &graph_ptr) {
  129. if (graph_ptr_ != graph_ptr) {
  130. MS_LOG(INFO) << "Debugger got new graph: " << graph_ptr->graph_id();
  131. // save new graph_ptr
  132. graph_ptr_ = graph_ptr;
  133. // check if it is dataset graph
  134. CheckDatasetGraph();
  135. if (!is_dataset_graph_) {
  136. // only try to enable debugger if it is not a dataset graph
  137. EnableDebugger();
  138. if (debugger_enabled_) {
  139. // get graph proto and send to mindinsight
  140. SendGraphAndSuspend(GetGraphProto());
  141. }
  142. }
  143. }
  144. }
  145. void Debugger::CheckDatasetGraph() {
  146. // print parameter node names
  147. const auto &params = graph_ptr_->inputs();
  148. for (const auto &param : params) {
  149. MS_LOG(INFO) << "param: " << param->fullname_with_scope();
  150. }
  151. // check if there is GetNext or InitDataSetQueue node
  152. const auto &nodes = graph_ptr_->execution_order();
  153. for (const auto &node : nodes) {
  154. auto node_name = AnfAlgo::GetCNodeName(node);
  155. MS_LOG(INFO) << "node: " << node->fullname_with_scope();
  156. if (node_name == "GetNext" || node_name == "InitDataSetQueue") {
  157. MS_LOG(WARNING) << "Not enabling debugger for graph " << graph_ptr_->graph_id() << ": found dataset graph node "
  158. << node_name;
  159. is_dataset_graph_ = true;
  160. return;
  161. }
  162. }
  163. is_dataset_graph_ = false;
  164. }
  165. GraphProto Debugger::GetGraphProto() const {
  166. // convert kernel graph to debugger modelproto
  167. ModelProto model = GetDebuggerFuncGraphProto(graph_ptr_);
  168. return model.graph();
  169. }
  170. void Debugger::SendGraphAndSuspend(const GraphProto &graph_proto) {
  171. // prepare metadata
  172. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  173. Metadata metadata;
  174. metadata.set_device_name(device_name);
  175. metadata.set_cur_step(num_step_);
  176. EventReply reply_metadata = grpc_client_->SendMetadata(metadata);
  177. if (reply_metadata.status() != reply_metadata.OK) {
  178. MS_LOG(ERROR) << "Error: SendMetadata failed";
  179. }
  180. // send graph to mindinght server
  181. EventReply reply = grpc_client_->SendGraph(graph_proto);
  182. if (reply.status() != reply.OK) {
  183. MS_LOG(ERROR) << "Error: SendGraph failed";
  184. }
  185. // enter command loop, wait and process commands
  186. CommandLoop();
  187. }
  188. void Debugger::CommandLoop() {
  189. // prepare metadata
  190. std::string device_name = std::to_string(device_id_) + ":" + std::to_string(graph_ptr_->graph_id());
  191. Metadata metadata;
  192. metadata.set_device_name(device_name);
  193. metadata.set_cur_step(num_step_);
  194. // loop exit flag
  195. bool run = false;
  196. int num_wait_fail = 0;
  197. const int max_num_wait_fail = 5;
  198. while (!run) {
  199. // wait for command
  200. EventReply reply = grpc_client_->WaitForCommand(metadata);
  201. if (reply.status() != reply.OK) {
  202. MS_LOG(ERROR) << "Error: WaitForCommand failed";
  203. num_wait_fail++;
  204. if (num_wait_fail > max_num_wait_fail) {
  205. MS_LOG(ERROR) << "Maximum number of WaitForCommand retry reached: exiting training session";
  206. Exit();
  207. }
  208. MS_LOG(ERROR) << "Number of consecutive WaitForCommand fail:" << num_wait_fail << "; Retry after "
  209. << num_wait_fail << "s";
  210. std::this_thread::sleep_for(std::chrono::milliseconds(1000 * num_wait_fail));
  211. continue;
  212. }
  213. // get type of the command in reply
  214. DebuggerCommand cmd = GetCommand(reply);
  215. if (cmd == DebuggerCommand::kUnknownCMD) {
  216. MS_LOG(ERROR) << "Error: debugger recieved unknown command";
  217. continue;
  218. }
  219. MS_LOG(INFO) << "recieved command: ";
  220. switch (cmd) {
  221. case DebuggerCommand::kUnknownCMD:
  222. MS_LOG(INFO) << "UnknownCMD";
  223. break;
  224. case DebuggerCommand::kExitCMD:
  225. MS_LOG(INFO) << "ExitCMD";
  226. Exit();
  227. break;
  228. case DebuggerCommand::kRunCMD:
  229. MS_LOG(INFO) << "RunCMD";
  230. // exit loop
  231. run = true;
  232. break;
  233. case DebuggerCommand::kSetCMD:
  234. MS_LOG(INFO) << "SetCMD";
  235. {
  236. // print set cmd content
  237. ProtoVector<WatchNode> recieved_nodes = GetWatchnodes(reply);
  238. for (auto node : recieved_nodes) {
  239. MS_LOG(INFO) << "node name: " << node.node_name();
  240. MS_LOG(INFO) << "node type: " << node.node_type();
  241. }
  242. MS_LOG(INFO) << "condition: " << GetWatchcondition(reply).condition();
  243. MS_LOG(INFO) << "id: " << GetWatchpointID(reply);
  244. MS_LOG(INFO) << "delete: " << GetWatchpointDelete(reply);
  245. }
  246. MS_LOG(INFO) << "Setting watchpoint";
  247. if (GetWatchpointDelete(reply)) {
  248. RemoveWatchpoint(GetWatchpointID(reply));
  249. } else {
  250. SetWatchpoint(GetWatchnodes(reply), GetWatchcondition(reply), GetWatchpointID(reply));
  251. }
  252. break;
  253. case DebuggerCommand::kViewCMD:
  254. MS_LOG(INFO) << "ViewCMD";
  255. {
  256. // print view cmd content
  257. ProtoVector<TensorProto> received_tensors = GetTensors(reply);
  258. for (auto tensor : received_tensors) {
  259. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  260. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  261. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  262. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  263. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  264. }
  265. }
  266. MS_LOG(INFO) << "Sending tensors";
  267. std::list<TensorProto> tensors = LoadTensors(GetTensors(reply));
  268. {
  269. // print view cmd reply
  270. for (auto tensor : tensors) {
  271. MS_LOG(INFO) << "tensor node name: " << tensor.node_name();
  272. MS_LOG(INFO) << "tensor slot: " << tensor.slot();
  273. MS_LOG(INFO) << "tensor finished: " << std::boolalpha << tensor.finished() << std::noboolalpha;
  274. MS_LOG(INFO) << "tensor iter: " << tensor.iter();
  275. MS_LOG(INFO) << "tensor truncate: " << std::boolalpha << tensor.truncate() << std::noboolalpha;
  276. MS_LOG(INFO) << "tensor dims: ";
  277. for (auto dim : tensor.dims()) {
  278. MS_LOG(INFO) << dim << ",";
  279. }
  280. MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
  281. }
  282. }
  283. EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
  284. if (send_tensors_reply.status() != send_tensors_reply.OK) {
  285. MS_LOG(ERROR) << "Error: SendTensors failed";
  286. }
  287. break;
  288. }
  289. }
  290. }
  291. void Debugger::SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id) {
  292. std::vector<std::tuple<std::string, bool>> check_node_list;
  293. std::transform(nodes.begin(), nodes.end(), std::back_inserter(check_node_list),
  294. [](WatchNode node) -> std::tuple<std::string, bool> {
  295. return make_tuple(node.node_name(), node.node_type() == "scope");
  296. });
  297. debug_services_->add_watchpoint(id, condition.condition(), check_node_list);
  298. }
  299. void Debugger::RemoveWatchpoint(const int32_t id) { debug_services_->remove_watchpoint(id); }
  300. std::list<TensorProto> Debugger::LoadTensors(const ProtoVector<TensorProto> &tensors) const {
  301. std::vector<std::string> name;
  302. std::vector<std::string> ret_name;
  303. std::vector<char *> data_ptr;
  304. std::vector<unsigned int> data_size;
  305. std::vector<TypePtr> dtype;
  306. std::vector<std::vector<int>> shape;
  307. std::transform(tensors.begin(), tensors.end(), std::back_inserter(name), GetTensorFullName);
  308. // ret_name will contain tensor names that are found in TensorLoader
  309. // items in ret_name will be in the same order with tensors if found
  310. debug_services_->read_nodes_tensors(name, &ret_name, &data_ptr, &data_size, &dtype, &shape);
  311. std::list<TensorProto> tensor_list;
  312. unsigned int result_index = 0;
  313. for (auto tensor : tensors) {
  314. TensorProto tensor_item;
  315. tensor_item.set_node_name(tensor.node_name());
  316. tensor_item.set_slot(tensor.slot());
  317. tensor_item.set_iter(tensor.iter());
  318. tensor_item.set_truncate(tensor.truncate());
  319. tensor_item.clear_tensor_content();
  320. tensor_item.clear_data_type();
  321. tensor_item.clear_dims();
  322. // always set finished to true before big tensor splitting is supported
  323. tensor_item.set_finished(true);
  324. // return empty tensor if didn't find the requested tensor
  325. if (result_index >= ret_name.size() || ret_name[result_index] != GetTensorFullName(tensor)) {
  326. tensor_list.push_back(tensor_item);
  327. continue;
  328. }
  329. tensor_item.set_tensor_content(data_ptr[result_index], data_size[result_index]);
  330. tensor_item.set_data_type(GetDebuggerNumberDataType(dtype[result_index]));
  331. for (auto &elem : shape[result_index]) {
  332. tensor_item.add_dims(elem);
  333. }
  334. // add tensor to result list and increment result_index to check next item in ret_name
  335. tensor_list.push_back(tensor_item);
  336. result_index++;
  337. }
  338. return tensor_list;
  339. }
  340. void Debugger::Exit() {
  341. // clear resource before exit
  342. pipeline::ClearResAtexit();
  343. std::exit(EXIT_FAILURE);
  344. }
  345. std::list<WatchpointHit> Debugger::CheckWatchpoints() const {
  346. std::vector<std::string> name;
  347. std::vector<std::string> slot;
  348. std::vector<char *> data_ptr;
  349. std::vector<unsigned int> data_size;
  350. std::vector<int> condition;
  351. std::vector<unsigned int> watchpoint_id;
  352. debug_services_->check_watchpoints(&name, &slot, &data_ptr, &data_size, &condition, &watchpoint_id);
  353. std::list<WatchpointHit> hits;
  354. for (unsigned int i = 0; i < name.size(); i++) {
  355. WatchpointHit hit;
  356. hit.set_id(watchpoint_id[i]);
  357. // here TensorProto act as a tensor indicator, not sending tensor content
  358. TensorProto *tensor_item = hit.mutable_tensor();
  359. tensor_item->set_node_name(name[i]);
  360. tensor_item->set_slot(slot[i]);
  361. tensor_item->set_finished(true);
  362. WatchCondition *condition_item = hit.mutable_watch_condition();
  363. condition_item->set_condition(debugger::WatchCondition_Condition(condition[i]));
  364. hits.push_back(hit);
  365. }
  366. return hits;
  367. }
  368. void Debugger::SendWatchpointsAndSuspend(const std::list<WatchpointHit> &points) {
  369. // send info about watchpoint
  370. if (!points.empty()) {
  371. EventReply reply = grpc_client_->SendWatchpointHits(points);
  372. if (reply.status() != reply.OK) {
  373. MS_LOG(ERROR) << "Error: SendWatchpointHits failed";
  374. }
  375. }
  376. // enter command loop
  377. CommandLoop();
  378. }
  379. DebugServices *Debugger::debug_services() const { return debug_services_.get(); }
  380. bool Debugger::debugger_enabled() const { return debugger_enabled_; }
  381. DebuggerCommand GetCommand(const EventReply &reply) {
  382. DebuggerCommand cmd = DebuggerCommand::kUnknownCMD;
  383. switch (reply.cmd_case()) {
  384. case debugger::EventReply::CmdCase::kExit:
  385. cmd = DebuggerCommand::kExitCMD;
  386. break;
  387. case debugger::EventReply::CmdCase::kRunCmd:
  388. cmd = DebuggerCommand::kRunCMD;
  389. break;
  390. case debugger::EventReply::CmdCase::kSetCmd:
  391. cmd = DebuggerCommand::kSetCMD;
  392. break;
  393. case debugger::EventReply::CmdCase::kViewCmd:
  394. cmd = DebuggerCommand::kViewCMD;
  395. break;
  396. default:
  397. MS_LOG(ERROR) << "Error: UnknownCMD";
  398. break;
  399. }
  400. return cmd;
  401. }
  402. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply) {
  403. if (!reply.has_set_cmd()) {
  404. MS_LOG(ERROR) << "Error: Not SetCMD, can not get WatchNodes. Returning default value: ProtoVector<WatchNode>().";
  405. return ProtoVector<WatchNode>();
  406. }
  407. return reply.set_cmd().watch_nodes();
  408. }
  409. WatchCondition GetWatchcondition(const EventReply &reply) {
  410. if (!reply.has_set_cmd() || !reply.set_cmd().has_watch_condition()) {
  411. MS_LOG(ERROR) << "Error: Can not get WatchCondition from command. Returning default value: WatchCondition().";
  412. return WatchCondition();
  413. }
  414. return reply.set_cmd().watch_condition();
  415. }
  416. int32_t GetWatchpointID(const EventReply &reply) {
  417. if (!reply.has_set_cmd()) {
  418. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint ID. Returning default value: 0.";
  419. return 0;
  420. }
  421. return reply.set_cmd().id();
  422. }
  423. bool GetWatchpointDelete(const EventReply &reply) {
  424. if (!reply.has_set_cmd()) {
  425. MS_LOG(ERROR) << "Error: Not SetCMD, can not get Watchpoint delete flag. Returning default value: false.";
  426. return false;
  427. }
  428. return reply.set_cmd().delete_();
  429. }
  430. ProtoVector<TensorProto> GetTensors(const EventReply &reply) {
  431. if (!reply.has_view_cmd()) {
  432. MS_LOG(ERROR) << "Error: Not ViewCMD, can not get Tensors. Returning default value: ProtoVector<TensorProto>().";
  433. return ProtoVector<TensorProto>();
  434. }
  435. return reply.view_cmd().tensors();
  436. }
  437. std::string GetTensorFullName(const TensorProto &tensor) {
  438. string node_name = tensor.node_name();
  439. if (tensor.truncate()) {
  440. // scopes in node name are seperated by '/'
  441. // use the name without scope if truncate is true
  442. std::size_t found = node_name.find_last_of("/");
  443. node_name = node_name.substr(found + 1);
  444. }
  445. return node_name + ":" + tensor.slot() + (tensor.iter() == "" ? "" : ":" + tensor.iter());
  446. }
  447. } // namespace mindspore