You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

e2e_dump.cc 28 kB

4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "debug/data_dump/e2e_dump.h"
  17. #include <unistd.h>
  18. #include <algorithm>
  19. #include <map>
  20. #include <set>
  21. #include <utility>
  22. #include <vector>
  23. #include "debug/data_dump/dump_json_parser.h"
  24. #include "common/trans.h"
  25. #include "debug/anf_ir_utils.h"
  26. #include "debug/common.h"
  27. #include "backend/session/anf_runtime_algorithm.h"
  28. #include "utils/ms_context.h"
  29. #include "runtime/device/kernel_runtime_manager.h"
  30. #include "utils/config_manager.h"
  31. #include "utils/file_utils.h"
  32. #include "debug/data_dump/tensor_stat_dump.h"
  33. #include "abstract/utils.h"
  34. #ifdef ENABLE_DEBUGGER
  35. #include "debug/debug_services.h"
  36. #include "debug/tensor_load.h"
  37. #include "debug/debugger/debugger.h"
  38. #endif
  39. namespace mindspore {
  40. #ifdef ENABLE_D
  41. using ProtoFormat = debugger::dump::OutputFormat;
  42. using ProtoDataType = debugger::dump::OutputDataType;
  43. constexpr int kDhaAtomicAddInfoSize = 128;
  44. constexpr int kL2AtomicAddInfoSize = 128;
  45. constexpr int kAiCoreInfoSize = 256;
  46. constexpr int kDhaAtomicAddStatusSize = 256;
  47. constexpr int kL2AtomicAddStatusSize = 256;
  48. constexpr int kUint64Size = sizeof(uint64_t);
  49. const std::set<std::pair<std::string, std::string>> kSuppTransFormatPair = {
  50. // {device format, host format}
  51. {kOpFormat_FRAC_Z, kOpFormat_NCHW}, {kOpFormat_FRAC_NZ, kOpFormat_NCHW},
  52. {kOpFormat_NC1HWC0, kOpFormat_NCHW}, {kOpFormat_C1HWNCoC0, kOpFormat_NCHW},
  53. {kOpFormat_NC1HWC0_C04, kOpFormat_NCHW}, {kOpFormat_NDC1HWC0, kOpFormat_NCHW},
  54. {kOpFormat_FRACTAL_Z_3D, kOpFormat_NCHW}};
  55. const std::map<ProtoFormat, std::string> kFormatToStringMap = {
  56. {ProtoFormat::FORMAT_NCHW, kOpFormat_NCHW},
  57. {ProtoFormat::FORMAT_NHWC, kOpFormat_NHWC},
  58. {ProtoFormat::FORMAT_ND, kOpFormat_ND},
  59. {ProtoFormat::FORMAT_NC1HWC0, kOpFormat_NC1HWC0},
  60. {ProtoFormat::FORMAT_FRACTAL_Z, kOpFormat_FRAC_Z},
  61. {ProtoFormat::FORMAT_NC1HWC0_C04, kOpFormat_NC1HWC0_C04},
  62. {ProtoFormat::FORMAT_FRACTAL_Z_C04, kOpFormat_FRACTAL_Z_C04},
  63. {ProtoFormat::FORMAT_NC1KHKWHWC0, kOpFormat_NC1KHKWHWC0},
  64. {ProtoFormat::FORMAT_HWCN, kOpFormat_HWCN},
  65. {ProtoFormat::FORMAT_NDHWC, kOpFormat_NDHWC},
  66. {ProtoFormat::FORMAT_NCDHW, kOpFormat_NCDHW},
  67. {ProtoFormat::FORMAT_DHWCN, kOpFormat_DHWCN},
  68. {ProtoFormat::FORMAT_DHWNC, kOpFormat_DHWNC},
  69. {ProtoFormat::FORMAT_NDC1HWC0, kOpFormat_NDC1HWC0},
  70. {ProtoFormat::FORMAT_FRACTAL_Z_3D, kOpFormat_FRACTAL_Z_3D},
  71. {ProtoFormat::FORMAT_C1HWNCoC0, kOpFormat_C1HWNCoC0},
  72. {ProtoFormat::FORMAT_FRACTAL_NZ, kOpFormat_FRAC_NZ},
  73. {ProtoFormat::FORMAT_FRACTAL_ZN_LSTM, kOpFormat_FRACTAL_ZN_LSTM}};
  74. const std::map<ProtoDataType, mindspore::TypeId> kDataTypetoMSTypeMap = {
  75. {ProtoDataType::DT_UNDEFINED, mindspore::TypeId::kTypeUnknown},
  76. {ProtoDataType::DT_FLOAT, mindspore::TypeId::kNumberTypeFloat32},
  77. {ProtoDataType::DT_FLOAT16, mindspore::TypeId::kNumberTypeFloat16},
  78. {ProtoDataType::DT_INT8, mindspore::TypeId::kNumberTypeInt8},
  79. {ProtoDataType::DT_UINT8, mindspore::TypeId::kNumberTypeUInt8},
  80. {ProtoDataType::DT_INT16, mindspore::TypeId::kNumberTypeInt16},
  81. {ProtoDataType::DT_UINT16, mindspore::TypeId::kNumberTypeUInt16},
  82. {ProtoDataType::DT_INT32, mindspore::TypeId::kNumberTypeInt32},
  83. {ProtoDataType::DT_INT64, mindspore::TypeId::kNumberTypeInt64},
  84. {ProtoDataType::DT_UINT32, mindspore::TypeId::kNumberTypeUInt32},
  85. {ProtoDataType::DT_UINT64, mindspore::TypeId::kNumberTypeUInt64},
  86. {ProtoDataType::DT_BOOL, mindspore::TypeId::kNumberTypeBool},
  87. {ProtoDataType::DT_DOUBLE, mindspore::TypeId::kNumberTypeFloat64},
  88. {ProtoDataType::DT_STRING, mindspore::TypeId::kObjectTypeString}};
  89. #endif
  90. bool E2eDump::IsDeviceTargetGPU() {
  91. auto context = MsContext::GetInstance();
  92. MS_EXCEPTION_IF_NULL(context);
  93. return context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kGPUDevice;
  94. }
  95. void E2eDump::DumpGPUMemToFile(const std::string &file_path, const std::string &original_kernel_name,
  96. const device::DeviceAddress &addr, const ShapeVector &int_shapes,
  97. const TypeId &host_type, const TypeId &device_type, bool trans_flag, size_t slot,
  98. const Debugger *debugger) {
  99. #ifdef ENABLE_DEBUGGER
  100. auto format = kOpFormat_DEFAULT;
  101. MS_EXCEPTION_IF_NULL(debugger);
  102. auto ret = debugger->DumpTensorToFile(original_kernel_name, trans_flag, file_path, format, int_shapes, host_type,
  103. device_type, addr.format(), slot);
  104. if (!ret) {
  105. MS_LOG(INFO) << "DumpTensorToFile Failed: flag:" << trans_flag << ", path:" << file_path
  106. << ", host_format:" << format;
  107. }
  108. #endif
  109. }
  110. void E2eDump::DumpOutput(const session::KernelGraph *graph, const std::string &dump_path, const Debugger *debugger) {
  111. MS_EXCEPTION_IF_NULL(graph);
  112. auto &dump_json_parser = DumpJsonParser::GetInstance();
  113. if (!dump_json_parser.OutputNeedDump()) {
  114. return;
  115. }
  116. MS_LOG(INFO) << "Start e2e dump output";
  117. bool trans_flag = dump_json_parser.trans_flag();
  118. const auto &apply_kernels = graph->execution_order();
  119. for (const auto &node : apply_kernels) {
  120. MS_EXCEPTION_IF_NULL(node);
  121. std::string kernel_name = GetKernelNodeName(node);
  122. if (!dump_json_parser.NeedDump(kernel_name)) {
  123. continue;
  124. }
  125. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  126. DumpOutputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  127. }
  128. }
  129. void E2eDump::DumpOutputSingleNode(const CNodePtr &node, const std::string &dump_path, const Debugger *debugger) {
  130. auto &dump_json_parser = DumpJsonParser::GetInstance();
  131. if (!dump_json_parser.OutputNeedDump()) {
  132. return;
  133. }
  134. bool trans_flag = dump_json_parser.trans_flag();
  135. MS_EXCEPTION_IF_NULL(node);
  136. std::string kernel_name = GetKernelNodeName(node);
  137. if (!dump_json_parser.NeedDump(kernel_name)) {
  138. return;
  139. }
  140. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  141. DumpOutputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  142. }
  143. void E2eDump::DumpOutputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
  144. std::string *kernel_name, const Debugger *debugger) {
  145. MS_EXCEPTION_IF_NULL(node);
  146. GetFileKernelName(NOT_NULL(kernel_name));
  147. auto output_size = AnfAlgo::GetOutputTensorNum(node);
  148. for (size_t j = 0; j < output_size; ++j) {
  149. if (!AnfAlgo::OutputAddrExist(node, j)) {
  150. continue;
  151. }
  152. auto addr = AnfAlgo::GetOutputAddr(node, j);
  153. MS_EXCEPTION_IF_NULL(addr);
  154. ShapeVector int_shapes;
  155. GetDumpIntShape(node, j, NOT_NULL(&int_shapes), trans_flag);
  156. auto type = AnfAlgo::GetOutputInferDataType(node, j);
  157. auto device_type = AnfAlgo::GetOutputDeviceDataType(node, j);
  158. std::string op_type = AnfAlgo::GetCNodeName(node);
  159. std::string op_name = GetOpNameWithoutScope(*kernel_name);
  160. uint32_t task_id = 0;
  161. uint32_t stream_id = 0;
  162. uint64_t timestamp = GetTimeStamp();
  163. std::string file_path = dump_path + '/' + op_type + '.' + op_name + '.' + std::to_string(task_id) + '.' +
  164. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".output." +
  165. std::to_string(j);
  166. if (IsDeviceTargetGPU()) {
  167. if (DumpJsonParser::GetInstance().IsStatisticDump()) {
  168. TensorStatDump stat_dump(op_type, op_name, task_id, stream_id, timestamp, false, j, j);
  169. stat_dump.DumpTensorStatsToFile(GetKernelNodeName(node), dump_path, debugger);
  170. }
  171. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  172. DumpGPUMemToFile(file_path, GetKernelNodeName(node), *addr, int_shapes, type, device_type, trans_flag, j,
  173. debugger);
  174. }
  175. } else {
  176. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  177. }
  178. }
  179. }
  180. void E2eDump::DumpInput(const session::KernelGraph *graph, const std::string &dump_path, const Debugger *debugger) {
  181. MS_EXCEPTION_IF_NULL(graph);
  182. auto &dump_json_parser = DumpJsonParser::GetInstance();
  183. if (!dump_json_parser.InputNeedDump()) {
  184. return;
  185. }
  186. MS_LOG(INFO) << "Start e2e dump input";
  187. bool trans_flag = dump_json_parser.trans_flag();
  188. const auto &apply_kernels = graph->execution_order();
  189. for (const auto &node : apply_kernels) {
  190. MS_EXCEPTION_IF_NULL(node);
  191. std::string kernel_name = GetKernelNodeName(node);
  192. if (!dump_json_parser.NeedDump(kernel_name)) {
  193. continue;
  194. }
  195. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  196. DumpInputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  197. }
  198. }
  199. void E2eDump::DumpInputSingleNode(const CNodePtr &node, const std::string &dump_path, const Debugger *debugger) {
  200. auto &dump_json_parser = DumpJsonParser::GetInstance();
  201. if (!dump_json_parser.InputNeedDump()) {
  202. return;
  203. }
  204. bool trans_flag = dump_json_parser.trans_flag();
  205. MS_EXCEPTION_IF_NULL(node);
  206. std::string kernel_name = GetKernelNodeName(node);
  207. if (!dump_json_parser.NeedDump(kernel_name)) {
  208. return;
  209. }
  210. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  211. DumpInputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  212. }
  213. void E2eDump::DumpInputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
  214. std::string *kernel_name, const Debugger *debugger) {
  215. MS_EXCEPTION_IF_NULL(node);
  216. GetFileKernelName(NOT_NULL(kernel_name));
  217. auto input_size = AnfAlgo::GetInputTensorNum(node);
  218. for (size_t j = 0; j < input_size; ++j) {
  219. auto kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, j);
  220. auto input = kernel_with_index.first;
  221. auto index = kernel_with_index.second;
  222. if (!AnfAlgo::OutputAddrExist(input, index)) {
  223. continue;
  224. }
  225. auto addr = AnfAlgo::GetOutputAddr(input, index);
  226. MS_EXCEPTION_IF_NULL(addr);
  227. std::string tensor_name = GetKernelNodeName(node);
  228. size_t slot = j;
  229. if (IsDeviceTargetGPU()) {
  230. auto input_kernel = node->input(j + 1);
  231. std::string input_kernel_name = GetKernelNodeName(input_kernel);
  232. tensor_name = input_kernel_name;
  233. slot = 0;
  234. }
  235. ShapeVector int_shapes;
  236. GetDumpIntShape(input, index, NOT_NULL(&int_shapes), trans_flag);
  237. auto type = AnfAlgo::GetOutputInferDataType(input, index);
  238. auto device_type = AnfAlgo::GetOutputDeviceDataType(input, index);
  239. std::string op_type = AnfAlgo::GetCNodeName(node);
  240. std::string op_name = GetOpNameWithoutScope(*kernel_name);
  241. uint64_t timestamp = GetTimeStamp();
  242. uint32_t task_id = 0;
  243. uint32_t stream_id = 0;
  244. std::string file_path = dump_path + '/' + op_type + '.' + op_name + '.' + std::to_string(task_id) + '.' +
  245. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".input." + std::to_string(j);
  246. MS_EXCEPTION_IF_NULL(addr);
  247. if (IsDeviceTargetGPU()) {
  248. if (DumpJsonParser::GetInstance().IsStatisticDump()) {
  249. TensorStatDump stat_dump(op_type, op_name, task_id, stream_id, timestamp, true, j, slot);
  250. stat_dump.DumpTensorStatsToFile(tensor_name, dump_path, debugger);
  251. }
  252. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  253. DumpGPUMemToFile(file_path, tensor_name, *addr, int_shapes, type, device_type, trans_flag, slot, debugger);
  254. }
  255. } else {
  256. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  257. }
  258. }
  259. }
  260. void E2eDump::DumpSingleAnfNode(const AnfNodePtr &anf_node, const size_t output_index, const std::string &dump_path,
  261. bool trans_flag, std::map<std::string, size_t> *const_map, const Debugger *debugger) {
  262. MS_EXCEPTION_IF_NULL(anf_node);
  263. auto &dump_json_parser = DumpJsonParser::GetInstance();
  264. if ((!anf_node->isa<Parameter>() && !anf_node->isa<ValueNode>()) || IsValueNode<StringImm>(anf_node)) {
  265. return;
  266. }
  267. std::string node_name = GetKernelNodeName(anf_node);
  268. std::string dump_name = node_name;
  269. if (anf_node->isa<ValueNode>()) {
  270. MS_EXCEPTION_IF_NULL(const_map);
  271. auto iter = const_map->find(node_name);
  272. if (iter == const_map->end()) {
  273. return;
  274. }
  275. dump_name = std::string("cst") + std::to_string(iter->second);
  276. }
  277. if (!dump_json_parser.NeedDump(node_name)) {
  278. return;
  279. }
  280. DumpJsonParser::GetInstance().MatchKernel(node_name);
  281. GetFileKernelName(NOT_NULL(&node_name));
  282. // check if output address exists, if not, return;
  283. if (!AnfAlgo::OutputAddrExist(anf_node, output_index)) {
  284. return;
  285. }
  286. auto addr = AnfAlgo::GetOutputAddr(anf_node, output_index);
  287. MS_EXCEPTION_IF_NULL(addr);
  288. ShapeVector int_shapes;
  289. GetDumpIntShape(anf_node, output_index, NOT_NULL(&int_shapes), trans_flag);
  290. auto type = AnfAlgo::GetOutputInferDataType(anf_node, output_index);
  291. auto device_type = AnfAlgo::GetOutputDeviceDataType(anf_node, output_index);
  292. uint64_t timestamp = GetTimeStamp();
  293. uint32_t task_id = 0;
  294. uint32_t stream_id = 0;
  295. std::string file_path = dump_path + "/Parameter." + dump_name + '.' + std::to_string(task_id) + '.' +
  296. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".output.0";
  297. if (IsDeviceTargetGPU()) {
  298. if (dump_json_parser.IsStatisticDump()) {
  299. TensorStatDump stat_dump("Parameter", dump_name, task_id, stream_id, timestamp, false, 0, 0);
  300. stat_dump.DumpTensorStatsToFile(node_name, dump_path, debugger);
  301. }
  302. if (dump_json_parser.IsTensorDump()) {
  303. DumpGPUMemToFile(file_path, node_name, *addr, int_shapes, type, device_type, trans_flag, 0, debugger);
  304. }
  305. } else {
  306. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  307. }
  308. }
  309. void E2eDump::DumpParametersAndConst(const session::KernelGraph *graph, const std::string &dump_path,
  310. const Debugger *debugger) {
  311. MS_EXCEPTION_IF_NULL(graph);
  312. auto &dump_json_parser = DumpJsonParser::GetInstance();
  313. if (!dump_json_parser.OutputNeedDump()) {
  314. return;
  315. }
  316. MS_LOG(INFO) << "Start e2e dump parameters and Const values";
  317. bool trans_flag = dump_json_parser.trans_flag();
  318. std::map<std::string, size_t> const_map;
  319. GetConstantId(graph, &const_map);
  320. // dump parameters
  321. const auto &parameters = graph->inputs();
  322. for (auto &item : parameters) {
  323. DumpSingleAnfNode(item, PARAMETER_OUTPUT_INDEX, dump_path, trans_flag, &const_map, debugger);
  324. }
  325. // dump const values
  326. auto value_nodes = graph->graph_value_nodes();
  327. for (const auto &value_node : value_nodes) {
  328. DumpSingleAnfNode(value_node, VALUE_NODE_OUTPUT_INDEX, dump_path, trans_flag, &const_map, debugger);
  329. }
  330. }
  331. void E2eDump::UpdateIterDumpSetup(const session::KernelGraph *graph, bool sink_mode) {
  332. uint32_t graph_id = graph->graph_id();
  333. auto &dump_json_parser = DumpJsonParser::GetInstance();
  334. if (IsDeviceTargetGPU()) {
  335. if (starting_graph_id == INT32_MAX) {
  336. starting_graph_id = graph_id;
  337. } else if (starting_graph_id == graph_id && !MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  338. // Update dump iter for mindrt runtime is done using UpdateIterGPUDump().
  339. // Update dump iter for GPU old runtime.
  340. dump_json_parser.UpdateDumpIter();
  341. }
  342. return;
  343. }
  344. // If device target is Ascend
  345. if (sink_mode && graph->IsDatasetGraph()) {
  346. MS_LOG(INFO) << "No need to update iteration for dataset graph.";
  347. return;
  348. }
  349. // In multi network scripts, dump iter is equal to the number of networks that have been executed so far.
  350. dump_json_parser.UpdateDumpIter();
  351. }
  352. void E2eDump::DumpSetup(const session::KernelGraph *graph) {
  353. auto &dump_json_parser = DumpJsonParser::GetInstance();
  354. bool sink_mode = (ConfigManager::GetInstance().dataset_mode() || E2eDump::isDatasetGraph(graph));
  355. if (dump_json_parser.async_dump_enabled() || dump_json_parser.e2e_dump_enabled()) {
  356. UpdateIterDumpSetup(graph, sink_mode);
  357. }
  358. }
  359. void E2eDump::UpdateIterGPUDump() { DumpJsonParser::GetInstance().UpdateDumpIter(); }
  360. void E2eDump::DumpRunIter(const KernelGraphPtr &graph, uint32_t rank_id) {
  361. auto &json_parser = DumpJsonParser::GetInstance();
  362. if (!(json_parser.async_dump_enabled() || json_parser.e2e_dump_enabled())) {
  363. return;
  364. }
  365. bool sink_mode = (ConfigManager::GetInstance().dataset_mode() || graph->IsDatasetGraph());
  366. auto iter_num = SizeToInt(LongToSize(ConfigManager::GetInstance().iter_num()));
  367. if (graph->IsDatasetGraph()) {
  368. MS_LOG(INFO) << "graph: " << graph->graph_id() << " is dataset graph, not creating graph history file.";
  369. return;
  370. }
  371. std::string execution_order_path = json_parser.path() + "/rank_" + std::to_string(rank_id) + "/execution_order/";
  372. std::string file_name_to_check =
  373. execution_order_path + "/ms_global_execution_order_graph_" + std::to_string(graph->graph_id()) + ".csv";
  374. auto real_path = Common::CreatePrefixPath(file_name_to_check);
  375. if (!real_path.has_value()) {
  376. MS_LOG(WARNING) << "Check file path: " << file_name_to_check << " failed.";
  377. return;
  378. }
  379. std::string file_name = real_path.value();
  380. ChangeFileMode(file_name, S_IWUSR);
  381. std::ofstream fout(file_name, std::ofstream::app);
  382. if (!fout.is_open()) {
  383. MS_LOG(WARNING) << "Open file for saving graph global execution order failed.";
  384. return;
  385. }
  386. if (sink_mode && json_parser.async_dump_enabled()) {
  387. // for async dump when sink_mode = true, cur_dump_iter() = current_epoch
  388. // dump history for all iterations in the epoch
  389. for (int i = 0; i < iter_num; i++) {
  390. fout << std::to_string(json_parser.cur_dump_iter() * iter_num + i) + "\n";
  391. }
  392. } else {
  393. fout << std::to_string(json_parser.cur_dump_iter()) + "\n";
  394. }
  395. fout.close();
  396. ChangeFileMode(file_name, S_IRUSR);
  397. }
  398. void E2eDump::DumpData(const session::KernelGraph *graph, uint32_t rank_id, const Debugger *debugger) {
  399. MS_EXCEPTION_IF_NULL(graph);
  400. bool success = false;
  401. auto &dump_json_parser = DumpJsonParser::GetInstance();
  402. uint32_t graph_id = graph->graph_id();
  403. if (dump_json_parser.GetIterDumpFlag()) {
  404. MS_LOG(INFO) << "Start e2e dump. Current iteration is " << dump_json_parser.cur_dump_iter();
  405. MS_LOG(INFO) << "Current graph id is " << graph_id;
  406. std::string dump_path = GenerateDumpPath(graph_id, rank_id);
  407. if (dump_json_parser.IsStatisticDump()) {
  408. TensorStatDump::OpenStatisticsFile(dump_path);
  409. }
  410. DumpInput(graph, dump_path, debugger);
  411. DumpOutput(graph, dump_path, debugger);
  412. DumpParametersAndConst(graph, dump_path, debugger);
  413. if (dump_json_parser.IsStatisticDump()) {
  414. CsvWriter::GetInstance().CloseFile();
  415. }
  416. success = true;
  417. }
  418. if (success) {
  419. MS_LOG(DEBUG) << "E2eDump Dump Data completed!";
  420. } else {
  421. MS_LOG(DEBUG) << "E2eDump Dump has not occurred!";
  422. }
  423. }
  424. bool E2eDump::DumpSingleNodeData(const CNodePtr &node, uint32_t graph_id, uint32_t rank_id, const Debugger *debugger) {
  425. bool success = false;
  426. auto &dump_json_parser = DumpJsonParser::GetInstance();
  427. if (dump_json_parser.GetIterDumpFlag()) {
  428. std::string dump_path = GenerateDumpPath(graph_id, rank_id);
  429. DumpInputSingleNode(node, dump_path, debugger);
  430. DumpOutputSingleNode(node, dump_path, debugger);
  431. success = true;
  432. }
  433. return success;
  434. }
  435. bool E2eDump::DumpParametersAndConstData(const session::KernelGraph *graph, uint32_t rank_id,
  436. const Debugger *debugger) {
  437. bool success = false;
  438. uint32_t graph_id = graph->graph_id();
  439. auto &dump_json_parser = DumpJsonParser::GetInstance();
  440. if (dump_json_parser.GetIterDumpFlag()) {
  441. MS_LOG(INFO) << "DumpParametersAndConst. Current iteration is " << dump_json_parser.cur_dump_iter();
  442. MS_LOG(INFO) << "Current graph id is " << graph_id;
  443. std::string dump_path = GenerateDumpPath(graph_id, rank_id);
  444. DumpParametersAndConst(graph, dump_path, debugger);
  445. success = true;
  446. }
  447. return success;
  448. }
  449. bool E2eDump::isDatasetGraph(const session::KernelGraph *graph) {
  450. // check if there is GetNext or InitDataSetQueue node
  451. const auto &nodes = graph->execution_order();
  452. for (const auto &node : nodes) {
  453. auto node_name = AnfAlgo::GetCNodeName(node);
  454. if (node_name == prim::kPrimGetNext->name() || node_name == prim::kPrimInitDataSetQueue->name()) {
  455. return true;
  456. }
  457. }
  458. return false;
  459. }
  460. bool E2eDump::DumpDirExists(const std::string &dump_path) {
  461. DIR *dir = opendir(dump_path.c_str());
  462. if (dir != nullptr) {
  463. MS_LOG(INFO) << "Dump dir " << dump_path << " exists";
  464. if (closedir(dir) == -1) {
  465. MS_LOG(WARNING) << "Dump dir " << dump_path << " close failed!";
  466. }
  467. return true;
  468. }
  469. return false;
  470. }
  471. #ifdef ENABLE_D
  472. void E2eDump::DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
  473. char *data_ptr) {
  474. // dump input tensors
  475. std::vector<debugger::dump::OpInput> input_tensors(dump_data.input().begin(), dump_data.input().end());
  476. uint64_t offset = 0;
  477. std::string in_path = dump_path + ".input.";
  478. for (uint32_t slot = 0; slot < input_tensors.size(); slot++) {
  479. auto in_tensor = input_tensors[slot];
  480. std::string in_slot_path = in_path + std::to_string(slot) + ".";
  481. auto succ = ConvertFormatForTensorAndDump(in_slot_path, in_tensor, data_ptr + offset);
  482. if (!succ) {
  483. MS_LOG(INFO) << "Failed to convert format for tensor " << in_slot_path;
  484. }
  485. offset += in_tensor.size();
  486. }
  487. // dump output tensors
  488. std::vector<debugger::dump::OpOutput> output_tensors(dump_data.output().begin(), dump_data.output().end());
  489. std::string out_path = dump_path + ".output.";
  490. for (uint32_t slot = 0; slot < output_tensors.size(); slot++) {
  491. auto out_tensor = output_tensors[slot];
  492. std::string out_slot_path = out_path + std::to_string(slot) + ".";
  493. auto succ = ConvertFormatForTensorAndDump(out_slot_path, out_tensor, data_ptr + offset);
  494. if (!succ) {
  495. MS_LOG(INFO) << "Failed to convert format for tensor " << out_slot_path;
  496. }
  497. offset += out_tensor.size();
  498. }
  499. }
  500. template <typename T>
  501. bool E2eDump::ConvertFormatForTensorAndDump(std::string dump_path, const T &tensor, char *data_ptr) {
  502. // get format
  503. auto iter_fmt = kFormatToStringMap.find(tensor.format());
  504. if (iter_fmt == kFormatToStringMap.end()) {
  505. MS_LOG(INFO) << "Unsupported tensor format " << iter_fmt->second << " for tensor " << dump_path;
  506. return false;
  507. }
  508. std::string device_format = iter_fmt->second;
  509. // get data type
  510. auto iter_dtype = kDataTypetoMSTypeMap.find(tensor.data_type());
  511. if (iter_dtype == kDataTypetoMSTypeMap.end()) {
  512. MS_LOG(INFO) << "Unsupported tensor type " << iter_dtype->second << " for tensor " << dump_path;
  513. return false;
  514. }
  515. auto src_type = iter_dtype->second;
  516. // get host shape
  517. std::vector<size_t> device_shape;
  518. (void)std::copy(tensor.shape().dim().begin(), tensor.shape().dim().end(), std::back_inserter(device_shape));
  519. std::vector<size_t> host_shape;
  520. (void)std::copy(tensor.original_shape().dim().begin(), tensor.original_shape().dim().end(),
  521. std::back_inserter(host_shape));
  522. ShapeVector shape_to;
  523. (void)std::transform(host_shape.begin(), host_shape.end(), std::back_inserter(shape_to), SizeToLong);
  524. size_t data_size = (size_t)tensor.size();
  525. bool trans_success = false;
  526. auto trans_buf = std::vector<uint8_t>(data_size);
  527. // convert format to host format. It can be either NCHW or ND (non 4-dimemsions).
  528. const uint8_t kNumFourDim = 4;
  529. std::string host_format;
  530. if (host_shape.size() == kNumFourDim) {
  531. host_format = kOpFormat_NCHW;
  532. } else {
  533. host_format = kOpFormat_ND;
  534. }
  535. if (device_format != host_format) {
  536. auto iter = kSuppTransFormatPair.find(std::make_pair(device_format, host_format));
  537. if (iter == kSuppTransFormatPair.end()) {
  538. MS_LOG(INFO) << "Do not support convert from format " << device_format << " to " << host_format << " for tensor "
  539. << dump_path;
  540. } else {
  541. const trans::FormatArgs format_args{data_ptr, data_size, host_format, device_format,
  542. host_shape, device_shape, src_type};
  543. auto group = tensor.sub_format() > 1 ? tensor.sub_format() : 1;
  544. trans_success = trans::TransFormatFromDeviceToHost(format_args, trans_buf.data(), group);
  545. if (!trans_success) {
  546. MS_LOG(ERROR) << "Trans format failed.";
  547. }
  548. }
  549. }
  550. // dump tensor data into npy file
  551. bool dump_success = false;
  552. if (trans_success) {
  553. dump_path += host_format;
  554. dump_success = DumpJsonParser::DumpToFile(dump_path, trans_buf.data(), data_size, shape_to, src_type);
  555. } else {
  556. dump_path += device_format;
  557. dump_success = DumpJsonParser::DumpToFile(dump_path, data_ptr, data_size, shape_to, src_type);
  558. }
  559. return dump_success;
  560. }
  561. uint64_t UnpackUint64Value(char *ptr) {
  562. #if defined(__APPLE__)
  563. return *reinterpret_cast<const uint64_t *>(ptr);
  564. #else
  565. return le16toh(*reinterpret_cast<const uint64_t *>(ptr));
  566. #endif
  567. }
  568. std::string IntToHexString(const uint64_t value) {
  569. std::stringstream ss;
  570. ss << "0x" << std::hex << value;
  571. return ss.str();
  572. }
  573. nlohmann::json E2eDump::ParseOverflowInfo(char *data_ptr) {
  574. uint32_t index = 0;
  575. uint64_t model_id = UnpackUint64Value(data_ptr + index);
  576. index += kUint64Size;
  577. uint64_t stream_id = UnpackUint64Value(data_ptr + index);
  578. index += kUint64Size;
  579. uint64_t task_id = UnpackUint64Value(data_ptr + index);
  580. index += kUint64Size;
  581. uint64_t task_type = UnpackUint64Value(data_ptr + index);
  582. index += kUint64Size;
  583. uint64_t pc_start = UnpackUint64Value(data_ptr + index);
  584. index += kUint64Size;
  585. uint64_t para_base = UnpackUint64Value(data_ptr + index);
  586. nlohmann::json overflow_info;
  587. overflow_info["model_id"] = model_id;
  588. overflow_info["stream_id"] = stream_id;
  589. overflow_info["task_id"] = task_id;
  590. overflow_info["task_type"] = task_type;
  591. overflow_info["pc_start"] = IntToHexString(pc_start);
  592. overflow_info["para_base"] = IntToHexString(para_base);
  593. return overflow_info;
  594. }
  595. void E2eDump::DumpOpDebugToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
  596. char *data_ptr) {
  597. std::string out_path = dump_path + ".output.";
  598. std::vector<debugger::dump::OpOutput> op_debug(dump_data.output().begin(), dump_data.output().end());
  599. for (uint32_t slot = 0; slot < op_debug.size(); slot++) {
  600. uint32_t index = 0;
  601. // parse DHA Atomic Add info
  602. nlohmann::json dha_atomic_add_info = ParseOverflowInfo(data_ptr + index);
  603. index += kDhaAtomicAddInfoSize;
  604. // parse L2 Atomic Add info
  605. nlohmann::json l2_atomic_add_info = ParseOverflowInfo(data_ptr + index);
  606. index += kL2AtomicAddInfoSize;
  607. // parse AICore info
  608. nlohmann::json ai_core_info = ParseOverflowInfo(data_ptr + index);
  609. index += kAiCoreInfoSize;
  610. // parse DHA Atomic Add status
  611. dha_atomic_add_info["status"] = UnpackUint64Value(data_ptr + index);
  612. index += kDhaAtomicAddStatusSize;
  613. // parse L2 Atomic Add status
  614. l2_atomic_add_info["status"] = UnpackUint64Value(data_ptr + index);
  615. index += kL2AtomicAddStatusSize;
  616. // parse AICore status
  617. uint64_t kernel_code = UnpackUint64Value(data_ptr + index);
  618. index += kUint64Size;
  619. uint64_t block_idx = UnpackUint64Value(data_ptr + index);
  620. index += kUint64Size;
  621. uint64_t status = UnpackUint64Value(data_ptr + index);
  622. ai_core_info["kernel_code"] = IntToHexString(kernel_code);
  623. ai_core_info["block_idx"] = block_idx;
  624. ai_core_info["status"] = status;
  625. nlohmann::json opdebug_data;
  626. opdebug_data["DHA Atomic Add"] = dha_atomic_add_info;
  627. opdebug_data["L2 Atomic Add"] = l2_atomic_add_info;
  628. opdebug_data["AI Core"] = ai_core_info;
  629. // save json to file
  630. DumpToFile(out_path + std::to_string(slot) + ".json", opdebug_data.dump());
  631. }
  632. }
  633. #endif // ENABLE_D
  634. } // namespace mindspore