You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debugger.h 12 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /**
  2. * Copyright 2020-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_
  17. #define MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_
  18. #include <list>
  19. #include <memory>
  20. #include <string>
  21. #include <utility>
  22. #include <vector>
  23. #include <map>
  24. #include "backend/common/session/kernel_graph.h"
  25. #include "debug/debugger/grpc_client.h"
  26. #include "debug/debug_services.h"
  27. #include "runtime/device/ms_device_shape_transfer.h"
  28. #ifdef ENABLE_D
  29. #include "debug/dump_data_builder.h"
  30. #endif
  31. #include "runtime/device/device_address.h"
  32. #include "include/backend/visible.h"
  33. using debugger::Chunk;
  34. using debugger::DataType;
  35. using debugger::EventReply;
  36. using debugger::GraphProto;
  37. using debugger::ModelProto;
  38. using debugger::Statistics;
  39. using debugger::TensorProto;
  40. using debugger::WatchCondition;
  41. using debugger::WatchCondition_Parameter;
  42. using debugger::WatchNode;
  43. using debugger::WatchpointHit;
  44. using DeviceTensor = mindspore::device::DeviceAddress;
  45. using DeviceTensorPtr = std::shared_ptr<DeviceTensor>;
  46. using mindspore::kernel::KernelLaunchInfo;
  47. template <class T>
  48. using ProtoVector = google::protobuf::RepeatedPtrField<T>;
  49. namespace mindspore {
  50. // different types of command received by debugger
  51. // need to keep sync with client-side proto and server-side proto
  52. enum class DebuggerCommand {
  53. kExitCMD = 2,
  54. kRunCMD = 3,
  55. kSetCMD = 4,
  56. kViewCMD = 5,
  57. kVersionMatchedCMD = 6,
  58. kUnknownCMD = -1
  59. };
  60. class BACKEND_EXPORT Debugger : public std::enable_shared_from_this<Debugger> {
  61. public:
  62. static std::shared_ptr<Debugger> GetInstance() {
  63. std::lock_guard<std::mutex> i_lock(instance_lock_);
  64. if (debugger_ == nullptr) {
  65. debugger_ = std::shared_ptr<Debugger>(new (std::nothrow) Debugger());
  66. }
  67. return debugger_;
  68. }
  69. // deconstructor
  70. ~Debugger() = default;
  71. // init
  72. // only save device_id
  73. void Init(const uint32_t device_id, const std::string device_target);
  74. // reset debugger
  75. void Reset();
  76. void PreExecuteGraphDebugger(const std::vector<KernelGraphPtr> &graphs,
  77. const std::vector<AnfNodePtr> &origin_parameters_order);
  78. // enable debugger
  79. // send graph and wait for command
  80. // do nothing if graph is set already
  81. void PreExecute(const KernelGraphPtr &graph_ptr);
  82. void SetCurrentAndPrevRootGraph(uint32_t root_graph_id);
  83. void SetAscendKernelByKernelFlag(bool value) { ascend_kernel_by_kernel_ = value; }
  84. bool GetAscendKernelByKernelFlag() const { return ascend_kernel_by_kernel_; }
  85. void StoreRunGraphIdList(uint32_t graph_id);
  86. // analyze tensors and wait for command
  87. // don't need a graph_ptr because it is saved during pre_execute
  88. void PostExecute();
  89. bool DumpDataEnabledIteration() const;
  90. static uint32_t GetRankID();
  91. void DumpConstantDataAscend(const KernelGraphPtr &graph);
  92. void DumpSingleNode(const CNodePtr &node, uint32_t graph_id, const KernelLaunchInfo *launch_info = nullptr);
  93. void DumpInGraphCompiler(const KernelGraphPtr &kernel_graph);
  94. void PostExecuteGraphDebugger();
  95. bool ReadNodeDataRequired(const CNodePtr &kernel) const;
  96. void PostExecuteNode(const CNodePtr &kernel, bool last_kernel);
  97. bool DumpTensorToFile(const std::string &filepath, bool trans_flag, const std::string &host_fmt,
  98. const std::string &addr_format, const std::string &tensor_name, size_t slot,
  99. const std::vector<int64_t> &host_shape, TypeId host_type) const;
  100. bool LoadNewTensor(const std::shared_ptr<TensorData> &tensor, bool keep_prev);
  101. std::shared_ptr<TensorData> GetTensor(const std::string &tensor_name) const;
  102. bool debugger_enabled() const;
  103. bool partial_memory() const;
  104. void SetEnableHeartbeat(bool enabled);
  105. void SetCurNode(const std::string &cur_name);
  106. std::string run_level() const;
  107. // check if any feature that uses the debugger backend is enabled
  108. bool DebuggerBackendEnabled() const;
  109. void SetTrainingDone(bool training_done);
  110. // returns true if reply received and mindspore version matched with mindInsight version
  111. // version_check should be true if you want the function to do backend compatibility check with MindInsight
  112. bool SendMetadata(bool version_check);
  113. bool CheckSendMetadata();
  114. void LoadParametersAndConst();
  115. void LoadParametersAndConst(const KernelGraphPtr &graph);
  116. void LoadParametersAllGraphs();
  117. void LoadConstsForGraph(const KernelGraphPtr &graph);
  118. void DumpParamsAndConstAndHistory();
  119. void UpdateStepNum(const session::KernelGraph *graph);
  120. void UpdateStepNumGPU();
  121. void ClearCurrentData();
  122. void LoadGraphOutputs();
  123. void CheckDatasetSinkMode(const KernelGraphPtr &graph_ptr);
  124. void LoadGraphs(const KernelGraphPtr &graph_ptr);
  125. uint32_t GetFirstRunGraphId() const;
  126. uint32_t GetCurrentRootGraphId() const { return cur_root_graph_id_; }
  127. uint32_t GetPrevRootGraphId() const { return prev_root_graph_id_; }
  128. std::vector<KernelGraphPtr> GetStepGraphPtrList() const { return graph_ptr_step_vec_; }
  129. void SetGraphPtr(const KernelGraphPtr &graph_ptr) { graph_ptr_ = graph_ptr; }
  130. const KernelGraphPtr GetGraphPtr() const { return graph_ptr_; }
  131. const std::list<KernelGraphPtr> GetGraphPtrList() const { return graph_ptr_list_; }
  132. bool TensorExistsInCurrent(const std::string &tensor_name);
  133. // check if dump using debugger backend is enabled
  134. bool CheckDebuggerDumpEnabled() const;
  135. // check if debugger is enabled
  136. bool CheckDebuggerEnabled() const;
  137. std::map<uint32_t, int32_t> GetGraphIterMap() { return graph_iter_num_map_; }
  138. void UpdateGraphIterMap(uint32_t graph_id, int32_t iter_num);
  139. std::vector<AnfNodePtr> GetParametersMindRT() const { return parameters_mindRT_; }
  140. #ifdef ENABLE_D
  141. std::shared_ptr<DumpDataBuilder> LoadDumpDataBuilder(const std::string &node_name);
  142. void ClearDumpDataBuilder(const std::string &node_name);
  143. void WaitForWriteFileFinished();
  144. #endif
  145. private:
  146. // private constructor for singleton
  147. Debugger();
  148. // enable debugger
  149. // instantiate class members
  150. // read env variable for grpc client
  151. void EnableDebugger();
  152. void CheckDebuggerEnabledParam() const;
  153. bool CheckDebuggerPartialMemoryEnabled() const;
  154. // check and save graph pointer
  155. void CheckGraphPtr(const KernelGraphPtr &graph_ptr);
  156. // check if the graph is a dataset graph
  157. void CheckDatasetGraph();
  158. // serialize graph and get proto
  159. GraphProto GetGraphProto(const KernelGraphPtr &graph_ptr) const;
  160. // send heartbeat message to UI once per 30 second by default
  161. void SendHeartbeat(int32_t period);
  162. // send graph and enter command wait loop
  163. void SendGraphAndSuspend(const GraphProto &graph_proto);
  164. void SendMultiGraphsAndSuspend(const std::list<GraphProto> &graph_proto_list);
  165. // send multi_graphs and clear the graph_proto_list_
  166. void SendMultiGraphsAndClear(const KernelGraphPtr &graph_ptr);
  167. // wait for command and process command
  168. // send command request and process reply in a loop
  169. // break if RunCMD
  170. void CommandLoop();
  171. // Process the RunCMD
  172. void ProcessRunCMD(const EventReply &reply);
  173. // Process the KSetCMD
  174. void ProcessKSetCMD(const EventReply &reply);
  175. // Process the KViewCMD
  176. void ProcessKViewCMD(const EventReply &reply);
  177. // ViewCMD base level
  178. void ViewBaseLevel(const EventReply &reply);
  179. // ViewCMD statistics level
  180. void ViewStatLevel(const EventReply &reply);
  181. // ViewCMD value level
  182. void ViewValueLevel(const EventReply &reply);
  183. // set what nodes and conditions to watch
  184. void SetWatchpoint(const ProtoVector<WatchNode> &nodes, const WatchCondition &condition, const int32_t id,
  185. const ProtoVector<WatchCondition_Parameter> &parameters);
  186. // remove watchpoint with id
  187. void RemoveWatchpoint(const int32_t id);
  188. // load tensor for view command
  189. std::list<TensorProto> LoadTensors(const ProtoVector<TensorProto> &tensors) const;
  190. // load tensor base for view command
  191. std::list<TensorBase> LoadTensorsBase(const ProtoVector<TensorProto> &tensors) const;
  192. // load tensor statistics for view command
  193. std::list<TensorSummary> LoadTensorsStat(const ProtoVector<TensorProto> &tensors) const;
  194. // terminate training process
  195. void Exit(bool exit_success = false);
  196. // analyze tensors and check watchpoint conditions
  197. // return names of tensors and what condition they hit
  198. std::list<WatchpointHit> CheckWatchpoints(const std::string &watchnode = std::string(),
  199. const CNodePtr &kernel = nullptr, bool recheck = false);
  200. // send watchpoints that hit
  201. void SendWatchpoints(const std::list<WatchpointHit> &points);
  202. // Check if the port is valid
  203. bool CheckPort(const std::string &port) const;
  204. // Check if the IP is valid
  205. bool CheckIp(const std::string &host) const;
  206. void LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output_index, uint32_t root_graph_id);
  207. void LoadSingleParameterMindRT(const AnfNodePtr &anf_node);
  208. // class members
  209. std::unique_ptr<GrpcClient> grpc_client_;
  210. std::unique_ptr<DebugServices> debug_services_;
  211. std::unique_ptr<std::thread> heartbeat_thread_;
  212. KernelGraphPtr graph_ptr_;
  213. uint32_t device_id_;
  214. std::string device_target_;
  215. int32_t num_step_;
  216. bool debugger_enabled_;
  217. bool suspended_at_last_kernel_;
  218. std::string run_level_;
  219. std::string node_name_;
  220. std::string cur_name_;
  221. bool training_done_;
  222. bool send_metadata_done_;
  223. bool received_new_graph_;
  224. bool is_dataset_graph_;
  225. bool partial_memory_;
  226. std::mutex access_lock_;
  227. uint32_t cur_root_graph_id_ = UINT32_MAX;
  228. uint32_t prev_root_graph_id_ = UINT32_MAX;
  229. // flag to keep track of the very first suspension of debugger
  230. bool initial_suspend_;
  231. bool enable_heartbeat_;
  232. std::list<GraphProto> graph_proto_list_;
  233. std::list<KernelGraphPtr> graph_ptr_list_;
  234. // The vector of graph pointers that have been run in the current step.
  235. std::vector<KernelGraphPtr> graph_ptr_step_vec_;
  236. // The vector of all the parameters for the current step for mindRT.
  237. std::vector<AnfNodePtr> parameters_mindRT_;
  238. std::vector<uint32_t> visited_root_graph_ids_;
  239. // map to store iter num in each epoch when dataset_sink_mode is true
  240. std::map<uint32_t, int32_t> graph_iter_num_map_;
  241. #ifdef ENABLE_D
  242. // to construct kernel data for async dump, key is the dump path to the node
  243. std::map<std::string, std::shared_ptr<DumpDataBuilder>> dump_data_construct_map_;
  244. #endif
  245. // singleton
  246. inline static std::mutex instance_lock_ = {};
  247. inline static std::shared_ptr<Debugger> debugger_ = nullptr;
  248. uint32_t not_dataset_graph_sum_;
  249. std::list<uint32_t> rungraph_id_list_;
  250. bool ascend_kernel_by_kernel_;
  251. std::string version_;
  252. };
  253. using DebuggerPtr = std::shared_ptr<Debugger>;
  254. // get debugger ModelProto
  255. ModelProto GetDebuggerFuncGraphProto(const FuncGraphPtr &func_graph);
  256. // for getting proto DataType from Type of Tensor
  257. DataType GetDebuggerNumberDataType(const TypePtr &type);
  258. // process reply and command type
  259. DebuggerCommand GetCommand(const EventReply &reply);
  260. // parse other data out of EventReply
  261. ProtoVector<WatchCondition_Parameter> GetParameters(const EventReply &reply);
  262. ProtoVector<WatchNode> GetWatchnodes(const EventReply &reply);
  263. std::string GetNodeName(const EventReply &reply);
  264. std::string GetRunLevel(const EventReply &reply);
  265. WatchCondition GetWatchcondition(const EventReply &reply);
  266. int32_t GetWatchpointID(const EventReply &reply);
  267. bool GetWatchpointDelete(const EventReply &reply);
  268. ProtoVector<TensorProto> GetTensors(const EventReply &reply);
  269. bool GetMiVersionMatched(const EventReply &reply);
  270. // get the full name of a tensor, which is the name used in TensorLoader
  271. std::string GetTensorFullName(const TensorProto &tensor);
  272. } // namespace mindspore
  273. #endif // MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_