You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

e2e_dump.cc 38 kB

4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /**
  2. * Copyright 2020-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "debug/data_dump/e2e_dump.h"
  17. #include <unistd.h>
  18. #include <sstream>
  19. #include <algorithm>
  20. #include <map>
  21. #include <memory>
  22. #include <set>
  23. #include <utility>
  24. #include <vector>
  25. #include "debug/data_dump/dump_json_parser.h"
  26. #include "utils/ms_device_shape_transfer.h"
  27. #include "debug/anf_ir_utils.h"
  28. #include "debug/common.h"
  29. #include "backend/common/session/anf_runtime_algorithm.h"
  30. #include "utils/ms_context.h"
  31. #include "runtime/device/kernel_runtime_manager.h"
  32. #include "utils/config_manager.h"
  33. #include "utils/file_utils.h"
  34. #include "debug/data_dump/tensor_stat_dump.h"
  35. #include "abstract/utils.h"
  36. #include "runtime/hardware/device_context_manager.h"
  37. #ifdef ENABLE_DEBUGGER
  38. #include "debug/debug_services.h"
  39. #include "debug/tensor_load.h"
  40. #include "debug/debugger/debugger.h"
  41. #endif
  42. namespace mindspore {
  43. #ifdef ENABLE_D
  44. using ProtoFormat = debugger::dump::OutputFormat;
  45. using ProtoDataType = debugger::dump::OutputDataType;
  46. constexpr int kDhaAtomicAddInfoSize = 128;
  47. constexpr int kL2AtomicAddInfoSize = 128;
  48. constexpr int kAiCoreInfoSize = 256;
  49. constexpr int kDhaAtomicAddStatusSize = 256;
  50. constexpr int kL2AtomicAddStatusSize = 256;
  51. constexpr int kUint64Size = sizeof(uint64_t);
  52. const std::set<std::pair<std::string, std::string>> kSuppTransFormatPair = {
  53. // {device format, host format}
  54. {kOpFormat_FRAC_Z, kOpFormat_NCHW}, {kOpFormat_FRAC_NZ, kOpFormat_NCHW},
  55. {kOpFormat_NC1HWC0, kOpFormat_NCHW}, {kOpFormat_C1HWNCoC0, kOpFormat_NCHW},
  56. {kOpFormat_NC1HWC0_C04, kOpFormat_NCHW}, {kOpFormat_NDC1HWC0, kOpFormat_NCHW},
  57. {kOpFormat_FRACTAL_Z_3D, kOpFormat_NCHW}};
  58. const std::map<ProtoFormat, std::string> kFormatToStringMap = {
  59. {ProtoFormat::FORMAT_NCHW, kOpFormat_NCHW},
  60. {ProtoFormat::FORMAT_NHWC, kOpFormat_NHWC},
  61. {ProtoFormat::FORMAT_ND, kOpFormat_ND},
  62. {ProtoFormat::FORMAT_NC1HWC0, kOpFormat_NC1HWC0},
  63. {ProtoFormat::FORMAT_FRACTAL_Z, kOpFormat_FRAC_Z},
  64. {ProtoFormat::FORMAT_NC1HWC0_C04, kOpFormat_NC1HWC0_C04},
  65. {ProtoFormat::FORMAT_FRACTAL_Z_C04, kOpFormat_FRACTAL_Z_C04},
  66. {ProtoFormat::FORMAT_NC1KHKWHWC0, kOpFormat_NC1KHKWHWC0},
  67. {ProtoFormat::FORMAT_HWCN, kOpFormat_HWCN},
  68. {ProtoFormat::FORMAT_NDHWC, kOpFormat_NDHWC},
  69. {ProtoFormat::FORMAT_NCDHW, kOpFormat_NCDHW},
  70. {ProtoFormat::FORMAT_DHWCN, kOpFormat_DHWCN},
  71. {ProtoFormat::FORMAT_DHWNC, kOpFormat_DHWNC},
  72. {ProtoFormat::FORMAT_NDC1HWC0, kOpFormat_NDC1HWC0},
  73. {ProtoFormat::FORMAT_FRACTAL_Z_3D, kOpFormat_FRACTAL_Z_3D},
  74. {ProtoFormat::FORMAT_C1HWNCoC0, kOpFormat_C1HWNCoC0},
  75. {ProtoFormat::FORMAT_FRACTAL_NZ, kOpFormat_FRAC_NZ},
  76. {ProtoFormat::FORMAT_FRACTAL_ZN_LSTM, kOpFormat_FRACTAL_ZN_LSTM}};
  77. const std::map<ProtoDataType, mindspore::TypeId> kDataTypetoMSTypeMap = {
  78. {ProtoDataType::DT_UNDEFINED, mindspore::TypeId::kTypeUnknown},
  79. {ProtoDataType::DT_FLOAT, mindspore::TypeId::kNumberTypeFloat32},
  80. {ProtoDataType::DT_FLOAT16, mindspore::TypeId::kNumberTypeFloat16},
  81. {ProtoDataType::DT_INT8, mindspore::TypeId::kNumberTypeInt8},
  82. {ProtoDataType::DT_UINT8, mindspore::TypeId::kNumberTypeUInt8},
  83. {ProtoDataType::DT_INT16, mindspore::TypeId::kNumberTypeInt16},
  84. {ProtoDataType::DT_UINT16, mindspore::TypeId::kNumberTypeUInt16},
  85. {ProtoDataType::DT_INT32, mindspore::TypeId::kNumberTypeInt32},
  86. {ProtoDataType::DT_INT64, mindspore::TypeId::kNumberTypeInt64},
  87. {ProtoDataType::DT_UINT32, mindspore::TypeId::kNumberTypeUInt32},
  88. {ProtoDataType::DT_UINT64, mindspore::TypeId::kNumberTypeUInt64},
  89. {ProtoDataType::DT_BOOL, mindspore::TypeId::kNumberTypeBool},
  90. {ProtoDataType::DT_DOUBLE, mindspore::TypeId::kNumberTypeFloat64},
  91. {ProtoDataType::DT_STRING, mindspore::TypeId::kObjectTypeString}};
  92. #endif
  93. bool E2eDump::IsDeviceTargetGPU() {
  94. auto context = MsContext::GetInstance();
  95. MS_EXCEPTION_IF_NULL(context);
  96. return context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kGPUDevice;
  97. }
  98. /*
  99. * Feature group: Dump.
  100. * Target device group: GPU.
  101. * Runtime category: Old runtime, MindRT.
  102. * Description: This function is for dumping tensor in memory to disk in GPU machine.
  103. */
  104. void E2eDump::DumpGPUMemToFile(const std::string &file_path, const std::string &original_kernel_name,
  105. const device::DeviceAddress &addr, const ShapeVector &int_shapes,
  106. const TypeId &host_type, const TypeId &device_type, bool trans_flag, size_t slot,
  107. const Debugger *debugger) {
  108. #ifdef ENABLE_DEBUGGER
  109. auto format = kOpFormat_DEFAULT;
  110. MS_EXCEPTION_IF_NULL(debugger);
  111. auto ret = debugger->DumpTensorToFile(original_kernel_name, trans_flag, file_path, format, int_shapes, host_type,
  112. device_type, addr.format(), slot);
  113. if (!ret) {
  114. MS_LOG(INFO) << "DumpTensorToFile Failed: flag:" << trans_flag << ", path:" << file_path
  115. << ", host_format:" << format;
  116. }
  117. #endif
  118. }
  119. void E2eDump::DumpOutput(const session::KernelGraph *graph, const std::string &dump_path, const Debugger *debugger) {
  120. MS_EXCEPTION_IF_NULL(graph);
  121. auto &dump_json_parser = DumpJsonParser::GetInstance();
  122. if (!dump_json_parser.OutputNeedDump()) {
  123. return;
  124. }
  125. MS_LOG(INFO) << "Start e2e dump output";
  126. bool trans_flag = dump_json_parser.trans_flag();
  127. const auto &apply_kernels = graph->execution_order();
  128. for (const auto &node : apply_kernels) {
  129. MS_EXCEPTION_IF_NULL(node);
  130. std::string kernel_name = GetKernelNodeName(node);
  131. if (!dump_json_parser.NeedDump(kernel_name)) {
  132. continue;
  133. }
  134. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  135. DumpOutputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  136. }
  137. }
  138. void E2eDump::DumpOutputSingleNode(const CNodePtr &node, const std::string &dump_path, const Debugger *debugger) {
  139. auto &dump_json_parser = DumpJsonParser::GetInstance();
  140. if (!dump_json_parser.OutputNeedDump()) {
  141. return;
  142. }
  143. bool trans_flag = dump_json_parser.trans_flag();
  144. MS_EXCEPTION_IF_NULL(node);
  145. std::string kernel_name = GetKernelNodeName(node);
  146. if (!dump_json_parser.NeedDump(kernel_name)) {
  147. return;
  148. }
  149. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  150. DumpOutputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  151. }
  152. void E2eDump::DumpOutputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
  153. std::string *kernel_name, const Debugger *debugger) {
  154. MS_EXCEPTION_IF_NULL(node);
  155. GetFileKernelName(NOT_NULL(kernel_name));
  156. auto output_size = AnfAlgo::GetOutputTensorNum(node);
  157. for (size_t j = 0; j < output_size; ++j) {
  158. if (!AnfAlgo::OutputAddrExist(node, j)) {
  159. continue;
  160. }
  161. auto addr = AnfAlgo::GetOutputAddr(node, j);
  162. MS_EXCEPTION_IF_NULL(addr);
  163. ShapeVector int_shapes;
  164. GetDumpIntShape(node, j, NOT_NULL(&int_shapes), trans_flag);
  165. auto type = AnfAlgo::GetOutputInferDataType(node, j);
  166. auto device_type = AnfAlgo::GetOutputDeviceDataType(node, j);
  167. std::string op_type = AnfAlgo::GetCNodeName(node);
  168. std::string op_name = GetOpNameWithoutScope(*kernel_name);
  169. uint32_t task_id = 0;
  170. uint32_t stream_id = 0;
  171. uint64_t timestamp = GetTimeStamp();
  172. std::string file_path = dump_path + '/' + op_type + '.' + op_name + '.' + std::to_string(task_id) + '.' +
  173. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".output." +
  174. std::to_string(j);
  175. if (DumpJsonParser::GetInstance().IsStatisticDump() &&
  176. (IsDeviceTargetGPU() || Debugger::GetInstance()->GetAscendKernelByKernelFlag())) {
  177. TensorStatDump stat_dump(op_type, op_name, task_id, stream_id, timestamp, false, j, j);
  178. (void)stat_dump.DumpTensorStatsToFile(GetKernelNodeName(node), dump_path, debugger);
  179. }
  180. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  181. if (IsDeviceTargetGPU()) {
  182. DumpGPUMemToFile(file_path, GetKernelNodeName(node), *addr, int_shapes, type, device_type, trans_flag, j,
  183. debugger);
  184. } else {
  185. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  186. }
  187. }
  188. }
  189. }
  190. void E2eDump::DumpInput(const session::KernelGraph *graph, const std::string &dump_path, const Debugger *debugger) {
  191. MS_EXCEPTION_IF_NULL(graph);
  192. auto &dump_json_parser = DumpJsonParser::GetInstance();
  193. if (!dump_json_parser.InputNeedDump()) {
  194. return;
  195. }
  196. MS_LOG(INFO) << "Start e2e dump input";
  197. bool trans_flag = dump_json_parser.trans_flag();
  198. const auto &apply_kernels = graph->execution_order();
  199. for (const auto &node : apply_kernels) {
  200. MS_EXCEPTION_IF_NULL(node);
  201. std::string kernel_name = GetKernelNodeName(node);
  202. if (!dump_json_parser.NeedDump(kernel_name)) {
  203. continue;
  204. }
  205. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  206. DumpInputImpl(node, trans_flag, dump_path, &kernel_name, debugger);
  207. }
  208. }
  209. void E2eDump::DumpInputSingleNode(const CNodePtr &node, const std::string &dump_path, const Debugger *debugger,
  210. const KernelLaunchInfo *launch_info) {
  211. auto &dump_json_parser = DumpJsonParser::GetInstance();
  212. if (!dump_json_parser.InputNeedDump()) {
  213. return;
  214. }
  215. bool trans_flag = dump_json_parser.trans_flag();
  216. MS_EXCEPTION_IF_NULL(node);
  217. std::string kernel_name = GetKernelNodeName(node);
  218. if (!dump_json_parser.NeedDump(kernel_name)) {
  219. return;
  220. }
  221. DumpJsonParser::GetInstance().MatchKernel(kernel_name);
  222. DumpInputImpl(node, trans_flag, dump_path, &kernel_name, debugger, launch_info);
  223. }
  224. std::shared_ptr<device::DeviceAddress> CreateAscendDeviceAddress(const KernelLaunchInfo *launch_info, size_t index,
  225. TypeId type) {
  226. MS_EXCEPTION_IF_NULL(launch_info);
  227. auto addr_ptr = launch_info->inputs_[index];
  228. auto ms_context = MsContext::GetInstance();
  229. MS_EXCEPTION_IF_NULL(ms_context);
  230. auto device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
  231. auto device_context =
  232. device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({kAscendDevice, device_id});
  233. auto format = kOpFormat_DEFAULT;
  234. MS_EXCEPTION_IF_NULL(addr_ptr);
  235. return device_context->CreateDeviceAddress(addr_ptr->addr, addr_ptr->size, format, type);
  236. }
  237. void E2eDump::DumpInputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
  238. std::string *kernel_name, const Debugger *debugger, const KernelLaunchInfo *launch_info) {
  239. MS_EXCEPTION_IF_NULL(node);
  240. GetFileKernelName(NOT_NULL(kernel_name));
  241. auto input_size = AnfAlgo::GetInputTensorNum(node);
  242. for (size_t j = 0; j < input_size; ++j) {
  243. auto kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, j);
  244. auto input = kernel_with_index.first;
  245. auto index = kernel_with_index.second;
  246. if (!AnfAlgo::OutputAddrExist(input, index)) {
  247. continue;
  248. }
  249. auto addr = AnfAlgo::GetOutputAddr(input, index);
  250. MS_EXCEPTION_IF_NULL(addr);
  251. std::string tensor_name = GetKernelNodeName(node);
  252. size_t slot = j;
  253. if (IsDeviceTargetGPU() || Debugger::GetInstance()->GetAscendKernelByKernelFlag()) {
  254. auto input_kernel = node->input(j + 1);
  255. std::string input_kernel_name = GetKernelNodeName(input_kernel);
  256. tensor_name = input_kernel_name;
  257. slot = 0;
  258. }
  259. ShapeVector int_shapes;
  260. GetDumpIntShape(input, index, NOT_NULL(&int_shapes), trans_flag);
  261. auto type = AnfAlgo::GetOutputInferDataType(input, index);
  262. auto device_type = AnfAlgo::GetOutputDeviceDataType(input, index);
  263. std::string op_type = AnfAlgo::GetCNodeName(node);
  264. std::string op_name = GetOpNameWithoutScope(*kernel_name);
  265. uint64_t timestamp = GetTimeStamp();
  266. uint32_t task_id = 0;
  267. uint32_t stream_id = 0;
  268. std::string file_path = dump_path + '/' + op_type + '.' + op_name + '.' + std::to_string(task_id) + '.' +
  269. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".input." + std::to_string(j);
  270. MS_EXCEPTION_IF_NULL(addr);
  271. if (DumpJsonParser::GetInstance().IsStatisticDump() &&
  272. (IsDeviceTargetGPU() || Debugger::GetInstance()->GetAscendKernelByKernelFlag())) {
  273. TensorStatDump stat_dump(op_type, op_name, task_id, stream_id, timestamp, true, j, slot);
  274. (void)stat_dump.DumpTensorStatsToFile(tensor_name, dump_path, debugger);
  275. }
  276. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  277. if (IsDeviceTargetGPU()) {
  278. DumpGPUMemToFile(file_path, tensor_name, *addr, int_shapes, type, device_type, trans_flag, slot, debugger);
  279. } else if (Debugger::GetInstance()->GetAscendKernelByKernelFlag()) {
  280. // load address from launch_info when it's Ascend Kernel by kernel mode.
  281. auto ascend_device_addr = CreateAscendDeviceAddress(launch_info, j, type);
  282. DumpMemToFile(file_path, *ascend_device_addr, int_shapes, type, trans_flag);
  283. } else {
  284. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  285. }
  286. }
  287. }
  288. }
  289. void E2eDump::DumpSingleAnfNode(const AnfNodePtr &anf_node, const size_t output_index, const std::string &dump_path,
  290. bool trans_flag, const Debugger *debugger) {
  291. MS_EXCEPTION_IF_NULL(anf_node);
  292. auto &dump_json_parser = DumpJsonParser::GetInstance();
  293. if ((!anf_node->isa<Parameter>() && !anf_node->isa<ValueNode>()) || IsValueNode<StringImm>(anf_node)) {
  294. return;
  295. }
  296. std::string node_name = GetKernelNodeName(anf_node);
  297. if (!dump_json_parser.NeedDump(node_name)) {
  298. return;
  299. }
  300. DumpJsonParser::GetInstance().MatchKernel(node_name);
  301. GetFileKernelName(NOT_NULL(&node_name));
  302. std::string dump_name = node_name;
  303. const std::string cst_prefix = "Default--";
  304. if (anf_node->isa<ValueNode>()) {
  305. if (dump_name.find(cst_prefix) == std::string::npos) {
  306. MS_LOG(INFO) << "Incorrect constant format: " << dump_name;
  307. return;
  308. }
  309. dump_name = node_name.substr(cst_prefix.length());
  310. trans_flag = false;
  311. }
  312. // check if output address exists, if not, return;
  313. if (!AnfAlgo::OutputAddrExist(anf_node, output_index)) {
  314. return;
  315. }
  316. auto addr = AnfAlgo::GetOutputAddr(anf_node, output_index);
  317. MS_EXCEPTION_IF_NULL(addr);
  318. ShapeVector int_shapes;
  319. GetDumpIntShape(anf_node, output_index, NOT_NULL(&int_shapes), trans_flag);
  320. auto type = AnfAlgo::GetOutputInferDataType(anf_node, output_index);
  321. auto device_type = AnfAlgo::GetOutputDeviceDataType(anf_node, output_index);
  322. uint64_t timestamp = GetTimeStamp();
  323. uint32_t task_id = 0;
  324. uint32_t stream_id = 0;
  325. std::string file_path = dump_path + "/Parameter." + dump_name + '.' + std::to_string(task_id) + '.' +
  326. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".output.0";
  327. if (IsDeviceTargetGPU()) {
  328. if (dump_json_parser.IsStatisticDump()) {
  329. TensorStatDump stat_dump("Parameter", dump_name, task_id, stream_id, timestamp, false, 0, 0);
  330. (void)stat_dump.DumpTensorStatsToFile(node_name, dump_path, debugger);
  331. }
  332. if (dump_json_parser.IsTensorDump()) {
  333. DumpGPUMemToFile(file_path, node_name, *addr, int_shapes, type, device_type, trans_flag, 0, debugger);
  334. }
  335. } else {
  336. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  337. }
  338. }
  339. /*
  340. * Feature group: Dump.
  341. * Target device group: Ascend, GPU.
  342. * Runtime category: MindRT.
  343. * Description: This function is similar to DumpSingleAnfNode function but it is only for dumping parameters in mindRT.
  344. * This function uses GetParameterInfo to get dump info for the parameter node.
  345. */
  346. void E2eDump::DumpSingleParameterNode(const AnfNodePtr &anf_node, const std::string &dump_path, bool trans_flag,
  347. const Debugger *debugger) {
  348. MS_EXCEPTION_IF_NULL(anf_node);
  349. auto &dump_json_parser = DumpJsonParser::GetInstance();
  350. std::string node_name = GetKernelNodeName(anf_node);
  351. if (!anf_node->isa<Parameter>() || !dump_json_parser.NeedDump(node_name) || !dump_json_parser.OutputNeedDump()) {
  352. return;
  353. }
  354. DumpJsonParser::GetInstance().MatchKernel(node_name);
  355. GetFileKernelName(NOT_NULL(&node_name));
  356. ShapeVector int_shapes;
  357. TypeId type;
  358. TypeId device_type;
  359. auto addr = GetParameterInfo(anf_node, NOT_NULL(&int_shapes), NOT_NULL(&type), NOT_NULL(&device_type));
  360. if (addr == nullptr) {
  361. MS_LOG(DEBUG) << "Skip node: " << node_name << ". Parameter data is not available for mindRT.";
  362. return;
  363. }
  364. uint64_t timestamp = GetTimeStamp();
  365. uint32_t task_id = 0;
  366. uint32_t stream_id = 0;
  367. std::string file_path = dump_path + "/Parameter." + node_name + '.' + std::to_string(task_id) + '.' +
  368. std::to_string(stream_id) + '.' + std::to_string(timestamp) + ".output.0";
  369. if (IsDeviceTargetGPU()) {
  370. if (dump_json_parser.IsStatisticDump()) {
  371. TensorStatDump stat_dump("Parameter", node_name, task_id, stream_id, timestamp, false, 0, 0);
  372. (void)stat_dump.DumpTensorStatsToFile(node_name, dump_path, debugger);
  373. }
  374. if (dump_json_parser.IsTensorDump()) {
  375. DumpGPUMemToFile(file_path, node_name, *addr, int_shapes, type, device_type, trans_flag, 0, debugger);
  376. }
  377. } else {
  378. DumpMemToFile(file_path, *addr, int_shapes, type, trans_flag);
  379. }
  380. }
  381. void E2eDump::DumpParameters(const session::KernelGraph *graph, const std::string &dump_path,
  382. const Debugger *debugger) {
  383. MS_EXCEPTION_IF_NULL(graph);
  384. auto &dump_json_parser = DumpJsonParser::GetInstance();
  385. if (!dump_json_parser.OutputNeedDump()) {
  386. return;
  387. }
  388. MS_LOG(INFO) << "Start e2e dump parameters";
  389. bool trans_flag = dump_json_parser.trans_flag();
  390. // dump parameters
  391. const auto &parameters = graph->inputs();
  392. for (auto &item : parameters) {
  393. DumpSingleAnfNode(item, PARAMETER_OUTPUT_INDEX, dump_path, trans_flag, debugger);
  394. }
  395. }
  396. void E2eDump::DumpConstantData(const session::KernelGraph *graph, uint32_t rank_id, const Debugger *debugger) {
  397. MS_EXCEPTION_IF_NULL(graph);
  398. auto &dump_json_parser = DumpJsonParser::GetInstance();
  399. if (!IsDeviceTargetGPU() || !dump_json_parser.e2e_dump_enabled()) {
  400. return;
  401. }
  402. uint32_t graph_id = graph->graph_id();
  403. std::string cst_path = GenerateDumpPath(graph_id, rank_id, true);
  404. if (!Common::FileExists(cst_path)) {
  405. DumpConstantData(graph, cst_path, debugger);
  406. }
  407. }
  408. void E2eDump::DumpConstantData(const session::KernelGraph *graph, const std::string &cst_dump_path,
  409. const Debugger *debugger) {
  410. // Dump constant to npy file
  411. MS_EXCEPTION_IF_NULL(graph);
  412. auto &dump_json_parser = DumpJsonParser::GetInstance();
  413. MS_LOG(INFO) << "DumpConstants. Current iteration is " << dump_json_parser.cur_dump_iter();
  414. MS_LOG(INFO) << "Current graph id is " << graph->graph_id();
  415. if (!dump_json_parser.OutputNeedDump()) {
  416. return;
  417. }
  418. const auto value_nodes = graph->graph_value_nodes();
  419. for (auto &item : value_nodes) {
  420. DumpSingleAnfNode(item, VALUE_NODE_OUTPUT_INDEX, cst_dump_path, false, debugger);
  421. }
  422. }
  423. /*
  424. * Feature group: Dump.
  425. * Target device group: Ascend, GPU.
  426. * Runtime category: Old runtime.
  427. * Description: This function is for updating dump iteration for GPU and ascend old runtime.
  428. */
  429. void E2eDump::UpdateIterOldRTDump(const session::KernelGraph *graph) {
  430. MS_EXCEPTION_IF_NULL(graph);
  431. auto &dump_json_parser = DumpJsonParser::GetInstance();
  432. uint32_t graph_id = graph->graph_id();
  433. if (IsDeviceTargetGPU()) {
  434. if (starting_graph_id == INT32_MAX) {
  435. starting_graph_id = graph_id;
  436. } else if (starting_graph_id == graph_id && !MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  437. // Update dump iter for mindrt runtime is done using UpdateIterGPUDump().
  438. // Update dump iter for GPU old runtime.
  439. dump_json_parser.UpdateDumpIter();
  440. }
  441. return;
  442. }
  443. // If device target is Ascend
  444. if (graph->IsDatasetGraph()) {
  445. MS_LOG(INFO) << "No need to update iteration for dataset graph.";
  446. return;
  447. }
  448. // In multi network scripts, dump iter is equal to the number of networks that have been executed so far.
  449. dump_json_parser.UpdateDumpIter();
  450. }
  451. /*
  452. * Feature group: Dump.
  453. * Target device group: Ascend, GPU.
  454. * Runtime category: MindRT.
  455. * Description: This function is for updating dump iteration for GPU and ascend MindRT dump. Please note that dump with
  456. * dataset_sink_mode = True is not supported for GPU.
  457. */
  458. void E2eDump::UpdateIterMindRTDump() {
  459. auto debugger = Debugger::GetInstance();
  460. // Dataset graph is always the first graph in the list when dataset_sink_mode is true.
  461. auto graph = (debugger->GetStepGraphPtrList())[0];
  462. auto context = MsContext::GetInstance();
  463. MS_EXCEPTION_IF_NULL(context);
  464. if (context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kAscendDevice && graph->IsDatasetGraph()) {
  465. MS_LOG(INFO) << "No need to update iteration for dataset graph.";
  466. return;
  467. }
  468. // update dump iter for GPU and kernel by kernel ascend dump.
  469. DumpJsonParser::GetInstance().UpdateDumpIter();
  470. }
  471. /*
  472. * Feature group: Dump.
  473. * Target device group: Ascend, GPU.
  474. * Runtime category: Old runtime, MindRT.
  475. * Description: Generates graph history files (dumping all the iteration numbers in which the graph was executed) for
  476. * the given graph and rank_id. If dataset_sink_mode is true for async dump in ascend, this function is called once per
  477. * each epoch and dumps all the iterations in the epoch to the graph history file.
  478. */
  479. void E2eDump::DumpRunIter(const KernelGraphPtr &graph, uint32_t rank_id) {
  480. auto &json_parser = DumpJsonParser::GetInstance();
  481. if (!(json_parser.async_dump_enabled() || json_parser.e2e_dump_enabled())) {
  482. return;
  483. }
  484. bool sink_mode = (ConfigManager::GetInstance().dataset_mode() || graph->IsDatasetGraph());
  485. auto iter_num = SizeToInt(LongToSize(ConfigManager::GetInstance().iter_num()));
  486. if (graph->IsDatasetGraph()) {
  487. MS_LOG(INFO) << "graph: " << graph->graph_id() << " is dataset graph, not creating graph history file.";
  488. return;
  489. }
  490. std::string execution_order_path = json_parser.path() + "/rank_" + std::to_string(rank_id) + "/execution_order/";
  491. std::string file_name_to_check =
  492. execution_order_path + "/ms_global_execution_order_graph_" + std::to_string(graph->graph_id()) + ".csv";
  493. auto real_path = Common::CreatePrefixPath(file_name_to_check);
  494. if (!real_path.has_value()) {
  495. MS_LOG(WARNING) << "Check file path: " << file_name_to_check << " failed.";
  496. return;
  497. }
  498. std::string file_name = real_path.value();
  499. ChangeFileMode(file_name, S_IWUSR);
  500. std::ofstream fout(file_name, std::ofstream::app);
  501. if (!fout.is_open()) {
  502. MS_LOG(WARNING) << "Open file for saving graph global execution order failed.";
  503. return;
  504. }
  505. if (sink_mode && json_parser.async_dump_enabled() && !Debugger::GetInstance()->GetAscendKernelByKernelFlag()) {
  506. // for async dump when sink_mode = true, cur_dump_iter() = current_epoch
  507. // dump history for all iterations in the epoch
  508. Debugger::GetInstance()->UpdateGraphIterMap(graph->graph_id(), iter_num);
  509. auto graph_iter_map = Debugger::GetInstance()->GetGraphIterMap();
  510. auto step_per_epoch = IntToSize(graph_iter_map[graph->graph_id()]);
  511. for (size_t i = 0; i < step_per_epoch; i++) {
  512. auto step = (json_parser.cur_dump_iter() * step_per_epoch) + i;
  513. fout << (std::to_string(step) + "\n");
  514. }
  515. } else {
  516. fout << std::to_string(json_parser.cur_dump_iter()) + "\n";
  517. }
  518. fout.close();
  519. ChangeFileMode(file_name, S_IRUSR);
  520. }
  521. /*
  522. * Feature group: Dump.
  523. * Target device group: Ascend, GPU.
  524. * Runtime category: Old runtime, MindRT.
  525. * Description: This function is for dumping the whole graph. It is used for old runtime in GPU and Ascend and
  526. * super-kernel mindRT in Ascend.
  527. */
  528. void E2eDump::DumpData(const session::KernelGraph *graph, uint32_t rank_id, const Debugger *debugger) {
  529. MS_EXCEPTION_IF_NULL(graph);
  530. bool success = false;
  531. auto &dump_json_parser = DumpJsonParser::GetInstance();
  532. uint32_t graph_id = graph->graph_id();
  533. if (!dump_json_parser.e2e_dump_enabled()) {
  534. return;
  535. }
  536. if (dump_json_parser.GetIterDumpFlag()) {
  537. MS_LOG(INFO) << "Start e2e dump. Current iteration is " << dump_json_parser.cur_dump_iter();
  538. MS_LOG(INFO) << "Current graph id is " << graph_id;
  539. std::string dump_path = GenerateDumpPath(graph_id, rank_id);
  540. if (dump_json_parser.IsStatisticDump()) {
  541. (void)TensorStatDump::OpenStatisticsFile(dump_path);
  542. }
  543. DumpInput(graph, dump_path, debugger);
  544. DumpOutput(graph, dump_path, debugger);
  545. if (!MsContext::GetInstance()->get_param<bool>(MS_CTX_ENABLE_MINDRT)) {
  546. // Dump parameters for old runtime. For mindRT it is done in PostExecuteGraphDebugger.
  547. DumpParameters(graph, dump_path, debugger);
  548. // DumpConstantData for GPU old runtime.
  549. DumpConstantData(graph, rank_id, debugger);
  550. }
  551. if (dump_json_parser.IsStatisticDump()) {
  552. CsvWriter::GetInstance().CloseFile();
  553. }
  554. success = true;
  555. }
  556. if (success) {
  557. MS_LOG(DEBUG) << "E2eDump Dump Data completed!";
  558. } else {
  559. MS_LOG(DEBUG) << "E2eDump Dump has not occurred!";
  560. }
  561. }
  562. /*
  563. * Feature group: Dump.
  564. * Target device group: Ascend, GPU.
  565. * Runtime category: MindRT.
  566. * Description: This function is for dumping a single node. It is used for mindrt in GPU and Ascend kernel-by-kernel.
  567. */
  568. bool E2eDump::DumpSingleNodeData(const CNodePtr &node, uint32_t graph_id, uint32_t rank_id, const Debugger *debugger,
  569. const KernelLaunchInfo *launch_info) {
  570. bool success = false;
  571. auto &dump_json_parser = DumpJsonParser::GetInstance();
  572. if (dump_json_parser.DumpEnabledForIter()) {
  573. std::string dump_path = GenerateDumpPath(graph_id, rank_id);
  574. DumpInputSingleNode(node, dump_path, debugger, launch_info);
  575. DumpOutputSingleNode(node, dump_path, debugger);
  576. success = true;
  577. }
  578. return success;
  579. }
  580. /*
  581. * Feature group: Dump.
  582. * Target device group: Ascend, GPU.
  583. * Runtime category: MindRT.
  584. * Description: This function is for dumping all the parameters in the current root graph for GPU, Ascend superkernel
  585. * (e2e dump) and Ascend kernel-by-kernel (e2e and async dump).
  586. */
  587. void E2eDump::DumpParametersData(uint32_t rank_id, const Debugger *debugger) {
  588. uint32_t root_graph_id = debugger->GetCurrentRootGraphId();
  589. auto &dump_json_parser = DumpJsonParser::GetInstance();
  590. if (dump_json_parser.async_dump_enabled() && !debugger->GetAscendKernelByKernelFlag()) {
  591. // Dump parameters for mindRT in async dump only for kernel by kernel mode.
  592. return;
  593. }
  594. if (dump_json_parser.DumpEnabledForIter()) {
  595. MS_LOG(INFO) << "DumpParameters. Current iteration is " << dump_json_parser.cur_dump_iter();
  596. MS_LOG(INFO) << "Current root graph id is " << root_graph_id;
  597. std::string dump_path = GenerateDumpPath(root_graph_id, rank_id);
  598. bool trans_flag = dump_json_parser.trans_flag();
  599. for (auto &item : debugger->GetParametersMindRT()) {
  600. DumpSingleParameterNode(item, dump_path, trans_flag, debugger);
  601. }
  602. }
  603. }
  604. #ifdef ENABLE_D
  605. /*
  606. * Feature group: Dump.
  607. * Target device group: Ascend.
  608. * Runtime category: Old runtime, MindRT.
  609. * Description: This function is for ascend A+M dump only. It parses and converts each slot of tensor in DumpData object
  610. * and dump the tensor data in npy file or statistic data in csv file.
  611. */
  612. void E2eDump::DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
  613. char *data_ptr) {
  614. // dump input tensors
  615. std::vector<debugger::dump::OpInput> input_tensors(dump_data.input().begin(), dump_data.input().end());
  616. uint64_t offset = 0;
  617. for (uint32_t slot = 0; slot < input_tensors.size(); slot++) {
  618. auto in_tensor = input_tensors[slot];
  619. auto succ = ConvertFormatForTensorAndDump(dump_path, in_tensor, data_ptr + offset, "input", slot);
  620. if (!succ) {
  621. MS_LOG(INFO) << "Failed to convert format for tensor " << dump_path << ".input." << slot;
  622. }
  623. offset += in_tensor.size();
  624. }
  625. // dump output tensors
  626. std::vector<debugger::dump::OpOutput> output_tensors(dump_data.output().begin(), dump_data.output().end());
  627. for (uint32_t slot = 0; slot < output_tensors.size(); slot++) {
  628. auto out_tensor = output_tensors[slot];
  629. auto succ = ConvertFormatForTensorAndDump(dump_path, out_tensor, data_ptr + offset, "output", slot);
  630. if (!succ) {
  631. MS_LOG(INFO) << "Failed to convert format for tensor " << dump_path << ".output." << slot;
  632. }
  633. offset += out_tensor.size();
  634. }
  635. }
  636. /*
  637. * Feature group: Dump.
  638. * Target device group: Ascend.
  639. * Runtime category: Old runtime, MindRT.
  640. * Description: It serves for A+M dump. Save statistic of the tensor data into dump path as configured.
  641. */
  642. template <typename T>
  643. bool DumpTensorStatsIfNeeded(const std::string &dump_path, const T &tensor, char *data_ptr, const std::string &io,
  644. uint32_t slot, const ShapeVector &shape, TypeId type) {
  645. // dump_path: dump_dir/op_type.op_name.task_id.stream_id.timestamp
  646. if (!DumpJsonParser::GetInstance().IsStatisticDump()) {
  647. return true;
  648. }
  649. size_t pos = dump_path.rfind("/");
  650. std::string file_name = dump_path.substr(pos + 1);
  651. size_t first_dot = file_name.find(".");
  652. size_t fourth_dot = file_name.rfind(".");
  653. size_t third_dot = file_name.rfind(".", fourth_dot - 1);
  654. size_t second_dot = file_name.rfind(".", third_dot - 1);
  655. if (first_dot == std::string::npos || second_dot == std::string::npos || third_dot == std::string::npos ||
  656. first_dot == second_dot) {
  657. MS_LOG(ERROR) << "Dump path " << dump_path << " received is not well formed";
  658. return false;
  659. }
  660. std::string op_type = file_name.substr(0, first_dot);
  661. std::string op_name = file_name.substr(first_dot + 1, second_dot - first_dot - 1);
  662. std::string task_id = file_name.substr(second_dot + 1, third_dot - second_dot - 1);
  663. std::string stream_id = file_name.substr(third_dot + 1, fourth_dot - third_dot - 1);
  664. std::string timestamp = file_name.substr(fourth_dot + 1);
  665. TensorStatDump stat_dump(op_type, op_name, task_id, stream_id, timestamp, io, slot, slot);
  666. std::shared_ptr<TensorData> data = std::make_shared<TensorData>();
  667. if (type <= TypeId::kNumberTypeBegin || type >= TypeId::kNumberTypeComplex64) {
  668. MS_LOG(ERROR) << "Data type of operator " << file_name << " is not supported by statistic dump";
  669. return false;
  670. }
  671. data->SetType(type);
  672. data->SetByteSize((size_t)tensor.size());
  673. data->SetShape(shape);
  674. data->SetDataPtr(data_ptr);
  675. return stat_dump.DumpTensorStatsToFile(dump_path.substr(0, pos), data);
  676. }
  677. /*
  678. * Feature group: Dump.
  679. * Target device group: Ascend.
  680. * Runtime category: Old runtime, MindRT.
  681. * Description: It serves for A+M dump. Parse each attributes in Dumpdata proto object from device format to mindspore
  682. * supported format and save tensor data or statistic as configured.
  683. */
  684. template <typename T>
  685. bool E2eDump::ConvertFormatForTensorAndDump(std::string dump_path, const T &tensor, char *data_ptr,
  686. const std::string &io, uint32_t slot) {
  687. // dump_path: dump_dir/op_type.op_name.task_id.stream_id.timestamp
  688. std::ostringstream dump_path_ss;
  689. dump_path_ss << dump_path << "." << io << "." << slot << ".";
  690. std::string dump_path_slot = dump_path_ss.str();
  691. // get format
  692. auto iter_fmt = kFormatToStringMap.find(tensor.format());
  693. if (iter_fmt == kFormatToStringMap.end()) {
  694. MS_LOG(INFO) << "Unsupported tensor format for tensor " << dump_path << ": unknown(" << tensor.format() << ")";
  695. return false;
  696. }
  697. std::string device_format = iter_fmt->second;
  698. // get data type
  699. auto iter_dtype = kDataTypetoMSTypeMap.find(tensor.data_type());
  700. if (iter_dtype == kDataTypetoMSTypeMap.end()) {
  701. MS_LOG(INFO) << "Unsupported data type for tensor " << dump_path << ": unknown(" << tensor.data_type() << ")";
  702. return false;
  703. }
  704. auto src_type = iter_dtype->second;
  705. // get host shape
  706. std::vector<size_t> device_shape;
  707. (void)std::copy(tensor.shape().dim().begin(), tensor.shape().dim().end(), std::back_inserter(device_shape));
  708. ShapeVector shape_d;
  709. (void)std::transform(device_shape.begin(), device_shape.end(), std::back_inserter(shape_d), SizeToLong);
  710. std::vector<size_t> host_shape;
  711. (void)std::copy(tensor.original_shape().dim().begin(), tensor.original_shape().dim().end(),
  712. std::back_inserter(host_shape));
  713. ShapeVector shape_to;
  714. (void)std::transform(host_shape.begin(), host_shape.end(), std::back_inserter(shape_to), SizeToLong);
  715. size_t data_size = (size_t)tensor.size();
  716. bool trans_success = false;
  717. auto trans_buf = std::vector<uint8_t>(data_size);
  718. // convert format to host format. It can be either NCHW or ND (non 4-dimemsions).
  719. const uint8_t kNumFourDim = 4;
  720. std::string host_format;
  721. if (host_shape.size() == kNumFourDim) {
  722. host_format = kOpFormat_NCHW;
  723. } else {
  724. host_format = kOpFormat_ND;
  725. }
  726. if (device_format != host_format) {
  727. auto iter = kSuppTransFormatPair.find(std::make_pair(device_format, host_format));
  728. if (iter == kSuppTransFormatPair.end()) {
  729. MS_LOG(INFO) << "Do not support convert from format " << device_format << " to " << host_format << " for tensor "
  730. << dump_path_slot;
  731. } else {
  732. const trans::FormatArgs format_args{data_ptr, data_size, host_format, device_format, shape_to, shape_d, src_type};
  733. auto group = tensor.sub_format() > 1 ? tensor.sub_format() : 1;
  734. trans_success = trans::TransFormatFromDeviceToHost(format_args, trans_buf.data(), group);
  735. if (!trans_success) {
  736. MS_LOG(ERROR) << "Trans format failed.";
  737. }
  738. }
  739. }
  740. // dump tensor data into npy file
  741. bool dump_success = true;
  742. if (trans_success) {
  743. dump_success = DumpTensorStatsIfNeeded(dump_path, tensor, reinterpret_cast<char *>(trans_buf.data()), io, slot,
  744. shape_to, src_type);
  745. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  746. dump_path_slot += host_format;
  747. dump_success =
  748. DumpJsonParser::DumpToFile(dump_path_slot, trans_buf.data(), data_size, shape_to, src_type) && dump_success;
  749. }
  750. } else {
  751. dump_success = DumpTensorStatsIfNeeded(dump_path, tensor, data_ptr, io, slot, shape_to, src_type);
  752. if (DumpJsonParser::GetInstance().IsTensorDump()) {
  753. dump_path_slot += device_format;
  754. dump_success =
  755. DumpJsonParser::DumpToFile(dump_path_slot, data_ptr, data_size, shape_to, src_type) && dump_success;
  756. }
  757. }
  758. return dump_success;
  759. }
  760. uint64_t UnpackUint64Value(char *ptr) {
  761. #if defined(__APPLE__)
  762. return *reinterpret_cast<const uint64_t *>(ptr);
  763. #else
  764. return le64toh(*reinterpret_cast<const uint64_t *>(ptr));
  765. #endif
  766. }
  767. std::string IntToHexString(const uint64_t value) {
  768. std::stringstream ss;
  769. ss << "0x" << std::hex << value;
  770. return ss.str();
  771. }
  772. nlohmann::json E2eDump::ParseOverflowInfo(char *data_ptr) {
  773. uint32_t index = 0;
  774. uint64_t model_id = UnpackUint64Value(data_ptr);
  775. index += kUint64Size;
  776. uint64_t stream_id = UnpackUint64Value(data_ptr + index);
  777. index += kUint64Size;
  778. uint64_t task_id = UnpackUint64Value(data_ptr + index);
  779. index += kUint64Size;
  780. uint64_t task_type = UnpackUint64Value(data_ptr + index);
  781. index += kUint64Size;
  782. uint64_t pc_start = UnpackUint64Value(data_ptr + index);
  783. index += kUint64Size;
  784. uint64_t para_base = UnpackUint64Value(data_ptr + index);
  785. nlohmann::json overflow_info;
  786. overflow_info["model_id"] = model_id;
  787. overflow_info["stream_id"] = stream_id;
  788. overflow_info["task_id"] = task_id;
  789. overflow_info["task_type"] = task_type;
  790. overflow_info["pc_start"] = IntToHexString(pc_start);
  791. overflow_info["para_base"] = IntToHexString(para_base);
  792. return overflow_info;
  793. }
  794. /*
  795. * Feature group: Dump.
  796. * Target device group: Ascend.
  797. * Runtime category: Old runtime, MindRT.
  798. * Description: This function is for Ascend A+M dump. It parses and dump op overflow info in json file.
  799. */
  800. void E2eDump::DumpOpDebugToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
  801. char *data_ptr) {
  802. std::string out_path = dump_path + ".output.";
  803. std::vector<debugger::dump::OpOutput> op_debug(dump_data.output().begin(), dump_data.output().end());
  804. for (uint32_t slot = 0; slot < op_debug.size(); slot++) {
  805. uint32_t index = 0;
  806. // parse DHA Atomic Add info
  807. nlohmann::json dha_atomic_add_info = ParseOverflowInfo(data_ptr + index);
  808. index += kDhaAtomicAddInfoSize;
  809. // parse L2 Atomic Add info
  810. nlohmann::json l2_atomic_add_info = ParseOverflowInfo(data_ptr + index);
  811. index += kL2AtomicAddInfoSize;
  812. // parse AICore info
  813. nlohmann::json ai_core_info = ParseOverflowInfo(data_ptr + index);
  814. index += kAiCoreInfoSize;
  815. // parse DHA Atomic Add status
  816. dha_atomic_add_info["status"] = UnpackUint64Value(data_ptr + index);
  817. index += kDhaAtomicAddStatusSize;
  818. // parse L2 Atomic Add status
  819. l2_atomic_add_info["status"] = UnpackUint64Value(data_ptr + index);
  820. index += kL2AtomicAddStatusSize;
  821. // parse AICore status
  822. uint64_t kernel_code = UnpackUint64Value(data_ptr + index);
  823. index += kUint64Size;
  824. uint64_t block_idx = UnpackUint64Value(data_ptr + index);
  825. index += kUint64Size;
  826. uint64_t status = UnpackUint64Value(data_ptr + index);
  827. ai_core_info["kernel_code"] = IntToHexString(kernel_code);
  828. ai_core_info["block_idx"] = block_idx;
  829. ai_core_info["status"] = status;
  830. nlohmann::json opdebug_data;
  831. opdebug_data["DHA Atomic Add"] = dha_atomic_add_info;
  832. opdebug_data["L2 Atomic Add"] = l2_atomic_add_info;
  833. opdebug_data["AI Core"] = ai_core_info;
  834. // save json to file
  835. DumpToFile(out_path + std::to_string(slot) + ".json", opdebug_data.dump());
  836. }
  837. }
  838. #endif // ENABLE_D
  839. } // namespace mindspore