You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.h 9.8 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_
  17. #define MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_
  18. #include <list>
  19. #include <memory>
  20. #include <string>
  21. #include <utility>
  22. #include <vector>
  23. #include <map>
  24. #include "backend/session/kernel_graph.h"
  25. #include "debug/debugger/grpc_client.h"
  26. #include "debug/debug_services.h"
  27. #include "common/trans.h"
  28. using debugger::Chunk;
  29. using debugger::DataType;
  30. using debugger::EventReply;
  31. using debugger::GraphProto;
  32. using debugger::ModelProto;
  33. using debugger::TensorProto;
  34. using debugger::WatchCondition;
  35. using debugger::WatchCondition_Parameter;
  36. using debugger::WatchNode;
  37. using debugger::WatchpointHit;
  38. template <class T>
  39. using ProtoVector = google::protobuf::RepeatedPtrField<T>;
  40. namespace mindspore {
  41. // different types of command received by debugger
  42. // need to keep sync with client-side proto and server-side proto
  43. enum class DebuggerCommand {
  44. kExitCMD = 2,
  45. kRunCMD = 3,
  46. kSetCMD = 4,
  47. kViewCMD = 5,
  48. kVersionMatchedCMD = 6,
  49. kUnknownCMD = -1
  50. };
  51. class Debugger : public std::enable_shared_from_this<Debugger> {
  52. public:
  53. static std::shared_ptr<Debugger> GetInstance() {
  54. std::lock_guard<std::mutex> i_lock(instance_lock_);
  55. if (debugger_ == nullptr) {
  56. debugger_ = std::shared_ptr<Debugger>(new (std::nothrow) Debugger());
  57. }
  58. return debugger_;
  59. }
  60. // deconstructor
  61. ~Debugger() = default;
  62. // init
  63. // only save device_id
  64. void Init(const uint32_t device_id, const std::string device_target);
  65. // reset debugger
  66. void Reset();
  67. void PreExecuteGraphDebugger(const std::vector<KernelGraphPtr> &graphs);
  68. // enable debugger
  69. // send graph and wait for command
  70. // do nothing if graph is set already
  71. void PreExecute(const KernelGraphPtr &graph_ptr);
  72. // analyze tensors and wait for command
  73. // don't need a graph_ptr because it is saved during pre_execute
  74. void PostExecute();
  75. bool DumpDataEnabledIteration() const;
  76. static uint32_t GetRankID();
  77. void Dump(const KernelGraphPtr &kernel_graph) const;
  78. void DumpSingleNode(const CNodePtr &node, uint32_t graph_id);
  79. void DumpSetup(const KernelGraphPtr &kernel_graph) const;
  80. void DumpInGraphCompiler(const KernelGraphPtr &kernel_graph);
  81. void PostExecuteGraphDebugger();
  82. bool ReadNodeDataRequired(const CNodePtr &kernel) const;
  83. void PostExecuteNode(const CNodePtr &kernel, bool last_kernel);
  84. // suspend the execution after a debug_op
  85. void PostDebugOp();
  86. bool DumpTensorToFile(const std::string &tensor_name, bool trans_flag, const std::string &filepath,
  87. const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
  88. TypeId device_type, const std::string &addr_format, size_t slot) const;
  89. bool DebugServicesIsWatchPoint(const std::string &kernel_name, const CNodePtr &kernel = nullptr) const;
  90. void EmptyTensor();
  91. void SetTensorLoaderIterNum(uint32_t iter_num);
  92. void EmptyPrevTensor();
  93. uint32_t GetTensorLoaderIterNum() const;
  94. bool LoadNewTensor(const std::shared_ptr<TensorData> &tensor, bool keep_prev);
  95. bool debugger_enabled() const;
  96. bool partial_memory() const;
  97. void SetCurNode(const std::string &cur_name);
  98. std::string run_level() const;
  99. void SetStepNum(int32_t cur_num_step);
  100. int32_t step_num() const;
  101. void SetStreamTaskToOpnameMap(const std::map<std::pair<uint32_t, uint32_t>, std::string> &mapping);
  102. // check if any feature that uses the debugger backend is enabled
  103. bool DebuggerBackendEnabled() const;
  104. void SetTrainingDone(bool training_done);
  105. // returns true if reply received and mindspore version matched with mindinsight version
  106. // version_check should be true if you want the function to do backend compatibility check with Mindinsight
  107. bool SendMetadata(bool version_check);
  108. void LoadParametersAndConst();
  109. void LoadParametersAndConst(const KernelGraphPtr &graph);
  110. void UpdateStepNum(const session::KernelGraph *graph);
  111. void UpdateStepNumGPU();
  112. void ClearCurrentData();
  113. void LoadGraphOutputs();
  114. void CheckDatasetSinkMode();
  115. void LoadGraphs(const KernelGraphPtr &graph_ptr);
  116. uint32_t GetFirstRunGraphId() const;
  117. void SetGraphPtr(const KernelGraphPtr &graph_ptr) { graph_ptr_ = graph_ptr; }
  118. std::list<KernelGraphPtr> GetGraphPtrList() const { return graph_ptr_list_; }
  119. bool TensorExistsInCurrent(const std::string &tensor_name);
  120. // check if dump using debugger backend is enabled
  121. bool CheckDebuggerDumpEnabled() const;
  122. private:
  123. // private constructor for singleton
  124. Debugger();
  125. // enable debugger
  126. // instantiate class members
  127. // read env variable for grpc client
  128. void EnableDebugger();
  129. // check if debugger enabled
  130. bool CheckDebuggerEnabled() const;
  131. void CheckDebuggerEnabledParam() const;
  132. bool CheckDebuggerPartialMemoryEnabled() const;
  133. // check and save graph pointer
  134. void CheckGraphPtr(const KernelGraphPtr &graph_ptr);
  135. // check if the graph is a dataset graph
  136. void CheckDatasetGraph();
  137. // serialize graph and get proto
  138. GraphProto GetGraphProto(const KernelGraphPtr &graph_ptr) const;
  139. // send graph and enter command wait loop
  140. void SendGraphAndSuspend(const GraphProto &graph_proto);
  141. void SendMultiGraphsAndSuspend(const std::list<GraphProto> &graph_proto_list);
  142. // wait for command and process command
  143. // send command request and process reply in a loop
  144. // break if RunCMD
  145. void CommandLoop();
  146. // Process the RunCMD
  147. void ProcessRunCMD(const EventReply &reply);
  148. // Process the KSetCMD
  149. void ProcessKSetCMD(const EventReply &reply);
  150. // Process the KViewCMD
  151. void ProcessKViewCMD(const EventReply &reply);
  152. // set what nodes and conditions to watch
  153. void SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id,
  154. const ProtoVector<WatchCondition_Parameter> &parameters);
  155. // remove watchpoint with id
  156. void RemoveWatchpoint(const int32_t id);
  157. // load tensor for view command
  158. std::list<TensorProto> LoadTensors(const ProtoVector<TensorProto> &tensors) const;
  159. // terminate training process
  160. void Exit();
  161. // analyze tensors and check watchpoint conditions
  162. // return names of tensors and what condition they hit
  163. std::list<WatchpointHit> CheckWatchpoints(const std::string &watchnode = std::string(),
  164. const CNodePtr &kernel = nullptr, bool recheck = false);
  165. // send watchpoints that hit
  166. void SendWatchpoints(const std::list<WatchpointHit> &points);
  167. // Find if any operation overflow happened and return their names
  168. std::vector<std::string> CheckOpOverflow();
  169. // Check if the port is valid
  170. bool CheckPort(const std::string &port) const;
  171. // Check if the IP is valid
  172. bool CheckIp(const std::string &host) const;
  173. void LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output_index);
  174. // class members
  175. std::unique_ptr<GrpcClient> grpc_client_;
  176. std::unique_ptr<DebugServices> debug_services_;
  177. KernelGraphPtr graph_ptr_;
  178. uint32_t device_id_;
  179. std::string device_target_;
  180. int32_t num_step_;
  181. bool debugger_enabled_;
  182. bool suspended_at_last_kernel_;
  183. std::string run_level_;
  184. std::string node_name_;
  185. std::string cur_name_;
  186. bool training_done_;
  187. bool is_dataset_graph_;
  188. bool partial_memory_;
  189. std::mutex access_lock_;
  190. std::map<std::pair<uint32_t, uint32_t>, std::string> stream_task_to_opname_;
  191. std::map<uint32_t, std::vector<std::string>> overflow_ops_;
  192. double last_overflow_bin_;
  193. std::map<uint32_t, std::string> overflow_bin_path_;
  194. // flag to keep track of the very first suspension of debugger
  195. bool initial_suspend_;
  196. std::list<GraphProto> graph_proto_list_;
  197. std::list<KernelGraphPtr> graph_ptr_list_;
  198. // singleton
  199. static std::mutex instance_lock_;
  200. static std::shared_ptr<Debugger> debugger_;
  201. uint32_t not_dataset_graph_sum_;
  202. std::list<uint32_t> rungraph_id_list_;
  203. std::string version_;
  204. };
  205. using DebuggerPtr = std::shared_ptr<Debugger>;
  206. // get debugger ModelProto
  207. std::string GetDebuggerFuncGraphProtoString(const FuncGraphPtr &func_graph);
  208. ModelProto GetDebuggerFuncGraphProto(const FuncGraphPtr &func_graph);
  209. // for getting proto DataType from Type of Tensor
  210. DataType GetDebuggerNumberDataType(const TypePtr &type);
  211. // process reply and command type
  212. DebuggerCommand GetCommand(const EventReply &reply);
  213. // parse other data out of EventReply
  214. ProtoVector<WatchCondition_Parameter> GetParameters(const EventReply &reply);
  215. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply);
  216. std::string GetNodeName(const EventReply &reply);
  217. std::string GetRunLevel(const EventReply &reply);
  218. WatchCondition GetWatchcondition(const EventReply &reply);
  219. int32_t GetWatchpointID(const EventReply &reply);
  220. bool GetWatchpointDelete(const EventReply &reply);
  221. ProtoVector<TensorProto> GetTensors(const EventReply &reply);
  222. bool GetMiVersionMatched(const EventReply &reply);
  223. // get the full name of a tensor, which is the name used in TensorLoader
  224. std::string GetTensorFullName(const TensorProto &tensor);
  225. uint64_t BytestoUInt64(const std::vector<char> &buffer);
  226. } // namespace mindspore
  227. #endif // MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_