You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

debug_services.cc 22 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <algorithm>
  17. #include <map>
  18. #include "backend/session/anf_runtime_algorithm.h"
  19. #include "debug/debug_services.h"
  20. namespace mindspore {
  21. DebugServices::DebugServices() {
  22. tensor_loader_ = new TensorLoader();
  23. uint32_t iter_num = -1;
  24. tensor_loader_->set_iter_num(iter_num);
  25. }
  26. DebugServices::DebugServices(const DebugServices &other) {
  27. tensor_loader_ = other.tensor_loader_;
  28. watchpoint_table = other.watchpoint_table;
  29. }
  30. DebugServices &DebugServices::operator=(const DebugServices &other) {
  31. if (this != &other) {
  32. tensor_loader_ = other.tensor_loader_;
  33. watchpoint_table = other.watchpoint_table;
  34. }
  35. return *this;
  36. }
  37. DebugServices::~DebugServices() { delete tensor_loader_; }
  38. void DebugServices::AddWatchpoint(unsigned int id, unsigned int watch_condition, float parameter,
  39. const std::vector<std::tuple<std::string, bool>> &check_node_list,
  40. const std::vector<parameter_t> &parameter_list) {
  41. std::lock_guard<std::mutex> lg(lock_);
  42. watchpoint_t watchpoint_item;
  43. watchpoint_item.id = id;
  44. watchpoint_item.condition.type = static_cast<CONDITION_TYPE>(watch_condition);
  45. watchpoint_item.condition.parameter = parameter;
  46. if (watch_condition > 2 && watch_condition < 13)
  47. // odd indices are greater than conditions and even indices are less than
  48. watchpoint_item.condition.comparison = (watch_condition & 1) == 0 ? "LT" : "GT";
  49. watchpoint_item.check_node_list = check_node_list;
  50. watchpoint_item.parameter_list = parameter_list;
  51. watchpoint_table[id] = watchpoint_item;
  52. }
  53. void DebugServices::RemoveWatchpoint(unsigned int id) {
  54. std::lock_guard<std::mutex> lg(lock_);
  55. watchpoint_table.erase(id);
  56. }
  57. template <typename T>
  58. DebugServices::tensor_stats DebugServices::SummarizeTensor(const T *start, const T *start_prev, unsigned int n,
  59. bool need_min_max, bool need_mean_sd,
  60. bool need_zero_percentage,
  61. bool need_tensor_update_ratio_mean, bool need_allclose,
  62. bool need_abs_mean) {
  63. tensor_stats stats;
  64. double zero_count = 0.0;
  65. double rtol = 1.0e-5;
  66. double atol = 1.0e-8;
  67. double update_ratio_sum = 0.0;
  68. double epsilon = 1.0e-9;
  69. for (unsigned int i = 0; i < n; ++i) {
  70. auto val = static_cast<double>(start[i]);
  71. double val_prev = 0.0;
  72. if (start_prev) {
  73. val_prev = static_cast<double>(start_prev[i]);
  74. }
  75. stats.has_nan = stats.has_nan || std::isnan(val);
  76. stats.has_inf = stats.has_inf || std::isinf(val);
  77. if (stats.has_inf && stats.has_nan) {
  78. // other statistics don't make sense in this case
  79. break;
  80. }
  81. if (need_min_max) {
  82. stats.min = std::min(stats.min, val);
  83. stats.max = std::max(stats.max, val);
  84. }
  85. if (need_mean_sd) {
  86. double delta = val - stats.mean;
  87. stats.mean += delta / (i + 1);
  88. stats.m2 += delta * (val - stats.mean);
  89. }
  90. if (need_abs_mean) {
  91. double delta = std::abs(val) - stats.abs_mean;
  92. stats.abs_mean += delta / (i + 1);
  93. }
  94. if (need_zero_percentage) {
  95. if (val == 0) zero_count++;
  96. }
  97. if (need_tensor_update_ratio_mean && start_prev) {
  98. update_ratio_sum += (std::abs(val - val_prev) / (epsilon + std::abs(val_prev)));
  99. }
  100. if (need_allclose && start_prev) {
  101. stats.allclose &= (std::abs(val - val_prev) <= (atol + rtol * std::abs(val_prev)));
  102. }
  103. }
  104. if (need_tensor_update_ratio_mean && start_prev) {
  105. stats.tensor_update_ratio_mean = (update_ratio_sum / n);
  106. }
  107. stats.zero_percentage = (zero_count / n) * 100;
  108. stats.n = n;
  109. return stats;
  110. }
  111. void DebugServices::CheckWatchpoints(std::vector<std::string> *name, std::vector<std::string> *slot,
  112. std::vector<int> *condition, std::vector<unsigned int> *watchpoint_id,
  113. std::vector<std::vector<parameter_t>> *parameters,
  114. const std::vector<std::string> &op_overflows,
  115. const std::vector<std::shared_ptr<TensorData>> &tensor_list,
  116. const bool init_dbg_suspend) {
  117. std::lock_guard<std::mutex> lg(lock_);
  118. if (watchpoint_table.empty()) {
  119. return;
  120. }
  121. for (const auto &tensor : tensor_list) {
  122. const auto tensor_name = tensor->GetName();
  123. const auto tensor_name_no_slot = tensor_name.substr(0, tensor_name.find_first_of(':'));
  124. const auto tensor_slot = std::to_string(tensor->GetSlot());
  125. mindspore::tensor::TensorPtr tensor_ptr = tensor->GetTensor();
  126. int tensor_dtype = tensor_ptr->data_type_c();
  127. std::vector<unsigned int> hit_encountered;
  128. std::vector<std::vector<bool>> hit_parms;
  129. std::unordered_map<unsigned int, watchpoint_t> watchpoints_to_check_table;
  130. bool min_max_enabled = false;
  131. bool mean_sd_enabled = false;
  132. bool inf_nan_enabled = false;
  133. bool zero_percentage_enabled = false;
  134. bool tensor_update_ratio_mean_enabled = false;
  135. bool allclose_enabled = false;
  136. bool abs_mean_enabled = false;
  137. for (auto w_table_item : watchpoint_table) {
  138. auto wp = std::get<1>(w_table_item);
  139. if (wp.condition.type == INIT && !init_dbg_suspend) continue;
  140. if (wp.condition.type != IS_OVERFLOW && tensor_dtype == kNumberTypeBool) continue;
  141. if (wp.IsNodeIncluded(tensor_name_no_slot)) {
  142. min_max_enabled |= wp.min_max_enabled();
  143. mean_sd_enabled |= wp.mean_sd_enabled();
  144. inf_nan_enabled |= wp.inf_nan_enabled();
  145. zero_percentage_enabled |= wp.zero_percentage_enabled();
  146. tensor_update_ratio_mean_enabled |= wp.tensor_update_ratio_mean_enabled();
  147. allclose_enabled |= wp.allclose_enabled();
  148. abs_mean_enabled |= wp.abs_mean_enabled();
  149. watchpoints_to_check_table[w_table_item.second.id] = w_table_item.second;
  150. }
  151. }
  152. tensor_stats stats;
  153. uint num_elements = tensor_ptr->DataSize();
  154. if (min_max_enabled || mean_sd_enabled || inf_nan_enabled || zero_percentage_enabled ||
  155. tensor_update_ratio_mean_enabled || allclose_enabled || abs_mean_enabled) {
  156. bool need_prev = (tensor_update_ratio_mean_enabled || allclose_enabled);
  157. bool have_prev = tensor_loader_->GetPrevTensor(tensor_name) != NULL;
  158. switch (tensor_dtype) {
  159. case kNumberTypeUInt8: {
  160. auto start_addr = reinterpret_cast<uint8_t *>(tensor_ptr->data_c());
  161. auto start_addr_prev =
  162. (need_prev && have_prev
  163. ? reinterpret_cast<uint8_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  164. : NULL);
  165. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  166. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  167. abs_mean_enabled);
  168. break;
  169. }
  170. case kNumberTypeInt8: {
  171. auto start_addr = reinterpret_cast<int8_t *>(tensor_ptr->data_c());
  172. auto start_addr_prev =
  173. (need_prev && have_prev
  174. ? reinterpret_cast<int8_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  175. : NULL);
  176. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  177. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  178. abs_mean_enabled);
  179. break;
  180. }
  181. case kNumberTypeUInt16: {
  182. auto start_addr = reinterpret_cast<uint16_t *>(tensor_ptr->data_c());
  183. auto start_addr_prev =
  184. (need_prev && have_prev
  185. ? reinterpret_cast<uint16_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  186. : NULL);
  187. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  188. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  189. abs_mean_enabled);
  190. break;
  191. }
  192. case kNumberTypeInt16: {
  193. auto start_addr = reinterpret_cast<int16_t *>(tensor_ptr->data_c());
  194. auto start_addr_prev =
  195. (need_prev && have_prev
  196. ? reinterpret_cast<int16_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  197. : NULL);
  198. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  199. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  200. abs_mean_enabled);
  201. break;
  202. }
  203. case kNumberTypeUInt32: {
  204. auto start_addr = reinterpret_cast<uint32_t *>(tensor_ptr->data_c());
  205. auto start_addr_prev =
  206. (need_prev && have_prev
  207. ? reinterpret_cast<uint32_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  208. : NULL);
  209. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  210. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  211. abs_mean_enabled);
  212. break;
  213. }
  214. case kNumberTypeInt32:
  215. case kNumberTypeInt: {
  216. auto start_addr = reinterpret_cast<int32_t *>(tensor_ptr->data_c());
  217. auto start_addr_prev =
  218. (need_prev && have_prev
  219. ? reinterpret_cast<int32_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  220. : NULL);
  221. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  222. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  223. abs_mean_enabled);
  224. break;
  225. }
  226. case kNumberTypeUInt64: {
  227. auto start_addr = reinterpret_cast<uint64_t *>(tensor_ptr->data_c());
  228. auto start_addr_prev =
  229. (need_prev && have_prev
  230. ? reinterpret_cast<uint64_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  231. : NULL);
  232. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  233. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  234. abs_mean_enabled);
  235. break;
  236. }
  237. case kNumberTypeInt64: {
  238. auto start_addr = reinterpret_cast<int64_t *>(tensor_ptr->data_c());
  239. auto start_addr_prev =
  240. (need_prev && have_prev
  241. ? reinterpret_cast<int64_t *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  242. : NULL);
  243. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  244. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  245. abs_mean_enabled);
  246. break;
  247. }
  248. case kNumberTypeFloat16: {
  249. auto start_addr = reinterpret_cast<float16 *>(tensor_ptr->data_c());
  250. auto start_addr_prev =
  251. (need_prev && have_prev
  252. ? reinterpret_cast<float16 *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  253. : NULL);
  254. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  255. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  256. abs_mean_enabled);
  257. break;
  258. }
  259. case kNumberTypeFloat32:
  260. case kNumberTypeFloat: {
  261. auto start_addr = reinterpret_cast<float *>(tensor_ptr->data_c());
  262. auto start_addr_prev =
  263. (need_prev && have_prev
  264. ? reinterpret_cast<float *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  265. : NULL);
  266. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  267. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  268. abs_mean_enabled);
  269. break;
  270. }
  271. case kNumberTypeFloat64: {
  272. auto start_addr = reinterpret_cast<double *>(tensor_ptr->data_c());
  273. auto start_addr_prev =
  274. (need_prev && have_prev
  275. ? reinterpret_cast<double *>(tensor_loader_->GetPrevTensor(tensor_name)->GetTensor()->data_c())
  276. : NULL);
  277. stats = SummarizeTensor(start_addr, start_addr_prev, num_elements, min_max_enabled, mean_sd_enabled,
  278. zero_percentage_enabled, tensor_update_ratio_mean_enabled, allclose_enabled,
  279. abs_mean_enabled);
  280. break;
  281. }
  282. default:
  283. MS_LOG(INFO) << "Unsupported tensor type";
  284. break;
  285. }
  286. }
  287. for (auto &it : watchpoints_to_check_table) {
  288. auto wp_id = it.second.id;
  289. std::vector<bool> hit_p;
  290. CONDITION_TYPE enabled_condition = it.second.condition.type;
  291. bool hit = (enabled_condition == HAS_NAN && stats.has_nan) || (enabled_condition == HAS_INF && stats.has_inf) ||
  292. (enabled_condition == GENERAL_OVERFLOW && (stats.has_nan || stats.has_inf)) ||
  293. (enabled_condition == IS_OVERFLOW &&
  294. std::find(op_overflows.begin(), op_overflows.end(), tensor_name_no_slot) != op_overflows.end());
  295. if (enabled_condition > 2 && enabled_condition != GENERAL_OVERFLOW) {
  296. if (stats.has_inf || stats.has_nan) {
  297. MS_LOG(WARNING) << "NaN or/and INF present in tensor: " << tensor_name << ". Cannot check "
  298. << condition_label[enabled_condition] << " watchpoint.";
  299. } else if (enabled_condition < 13) {
  300. bool gt = stats.statLookup(enabled_condition) > it.second.condition.parameter;
  301. bool lt = stats.statLookup(enabled_condition) < it.second.condition.parameter;
  302. hit |= it.second.condition.comparison == "GT" ? gt : lt;
  303. } else {
  304. std::vector<parameter_t> parameter_list_item = it.second.parameter_list;
  305. for (auto &p : parameter_list_item) {
  306. if (p.disabled == false) {
  307. bool p_hit = false;
  308. if (p.name == "zero_percentage_ge") {
  309. p_hit = stats.parmLookup(STAT_ZERO_PERCENTAGE) >= p.value;
  310. } else if (p.name == "max_gt") {
  311. p_hit = stats.parmLookup(STAT_MAX) > p.value;
  312. } else if (p.name == "max_lt") {
  313. p_hit = stats.parmLookup(STAT_MAX) < p.value;
  314. } else if (p.name == "min_gt") {
  315. p_hit = stats.parmLookup(STAT_MIN) > p.value;
  316. } else if (p.name == "min_lt") {
  317. p_hit = stats.parmLookup(STAT_MIN) < p.value;
  318. } else if (p.name == "mean_gt") {
  319. p_hit = stats.parmLookup(STAT_MEAN) > p.value;
  320. } else if (p.name == "mean_lt") {
  321. p_hit = stats.parmLookup(STAT_MEAN) < p.value;
  322. } else if (p.name == "abs_mean_gt") {
  323. p_hit = stats.parmLookup(STAT_ABS_MEAN) > p.value;
  324. } else if (p.name == "abs_mean_lt") {
  325. p_hit = stats.parmLookup(STAT_ABS_MEAN) < p.value;
  326. } else if (p.name == "abs_update_ratio_mean_gt") {
  327. p_hit = stats.parmLookup(STAT_TENSOR_UPDATE_RATIO_MEAN) > p.value;
  328. } else if (p.name == "abs_update_ratio_mean_lt") {
  329. p_hit = stats.parmLookup(STAT_TENSOR_UPDATE_RATIO_MEAN) < p.value;
  330. }
  331. hit |= p_hit;
  332. hit_p.push_back(p_hit);
  333. } else {
  334. hit_p.push_back(false);
  335. }
  336. }
  337. hit |= (enabled_condition == NOT_CHANGED && stats.parmLookup(STAT_ALLCLOSE));
  338. if (hit) hit_parms.push_back(hit_p);
  339. }
  340. }
  341. if (hit) hit_encountered.push_back(wp_id);
  342. }
  343. unsigned int index_parm_list = 0;
  344. for (auto it_hit_id = hit_encountered.begin(); it_hit_id != hit_encountered.end(); ++it_hit_id) {
  345. if (watchpoint_table.find(*it_hit_id) != watchpoint_table.end()) {
  346. // return fully qualified name for weights and bias to MI
  347. auto found_dot = tensor_name_no_slot.find_last_of('.');
  348. if (found_dot != std::string::npos && (tensor_name_no_slot.substr(found_dot + 1) == "weight" ||
  349. tensor_name_no_slot.substr(found_dot + 1) == "bias")) {
  350. auto check_node_list = watchpoint_table.find(*it_hit_id)->second.check_node_list;
  351. bool found_match = false;
  352. for (auto check_node : check_node_list) {
  353. std::string w_name = std::get<0>(check_node);
  354. auto found_slash = w_name.find_last_of('/');
  355. if (found_slash != std::string::npos && w_name.substr(found_slash + 1) == tensor_name_no_slot) {
  356. name->push_back(w_name);
  357. found_match = true;
  358. break;
  359. }
  360. }
  361. if (!found_match) {
  362. name->push_back(tensor_name_no_slot);
  363. }
  364. } else {
  365. name->push_back(tensor_name_no_slot);
  366. }
  367. slot->push_back(tensor_slot);
  368. int condition_item = watchpoint_table.find(*it_hit_id)->second.condition.type;
  369. condition->push_back(condition_item);
  370. watchpoint_id->push_back(*it_hit_id);
  371. std::vector<parameter_t> parameter_list_item = watchpoint_table.find(*it_hit_id)->second.parameter_list;
  372. if (condition_item >= 13) {
  373. unsigned int index_hit_parm = 0;
  374. for (auto &p : parameter_list_item) {
  375. p.hit = hit_parms[index_parm_list][index_hit_parm];
  376. index_hit_parm++;
  377. }
  378. index_parm_list++;
  379. }
  380. parameters->push_back(parameter_list_item);
  381. }
  382. watchpoints_to_check_table.erase(*it_hit_id);
  383. }
  384. }
  385. }
  386. void DebugServices::ReadNodesTensors(std::vector<std::string> name, std::vector<std::string> *ret_name,
  387. std::vector<char *> *data_ptr, std::vector<unsigned int> *data_size,
  388. std::vector<TypePtr> *dtype, std::vector<std::vector<int64_t>> *shape) {
  389. std::vector<std::tuple<std::string, std::shared_ptr<TensorData>>> result_list;
  390. tensor_loader_->SearchTensors(name, &result_list);
  391. for (auto result : result_list) {
  392. if (!std::get<1>(result)) {
  393. continue;
  394. }
  395. ret_name->push_back(std::get<0>(result));
  396. data_ptr->push_back(reinterpret_cast<char *>(std::get<1>(result)->GetTensor()->data_c()));
  397. data_size->push_back(std::get<1>(result)->GetTensor()->data().nbytes());
  398. dtype->push_back(std::get<1>(result)->GetTensor()->Dtype());
  399. shape->push_back(std::get<1>(result)->GetTensor()->shape());
  400. }
  401. }
  402. bool DebugServices::IsWatchPoint(std::string kernel_name, const CNodePtr &kernel) {
  403. bool ret = false;
  404. for (auto w_table_item : watchpoint_table) {
  405. auto check_node_list = std::get<1>(w_table_item).check_node_list;
  406. for (auto check_node : check_node_list) {
  407. std::string w_name = std::get<0>(check_node);
  408. bool w_type = std::get<1>(check_node);
  409. if ((w_type == true &&
  410. ((kernel_name.find(w_name) != string::npos && kernel_name.rfind(w_name, 0) == 0) || w_name == "*")) ||
  411. (w_type == false && (kernel_name == w_name || IsWatchPointNodeInput(w_name, kernel)))) {
  412. ret = true;
  413. return ret;
  414. }
  415. }
  416. }
  417. return ret;
  418. }
  419. bool DebugServices::IsWatchPointNodeInput(std::string w_name, const CNodePtr &kernel) {
  420. if (kernel) {
  421. auto input_size = AnfAlgo::GetInputTensorNum(kernel);
  422. for (size_t j = 0; j < input_size; ++j) {
  423. auto input_kernel = kernel->input(j + 1);
  424. std::string input_kernel_name = input_kernel->fullname_with_scope();
  425. auto found = w_name.find_last_of('/');
  426. if (found != std::string::npos && w_name.substr(found + 1) == input_kernel_name) return true;
  427. }
  428. return false;
  429. } else {
  430. return false;
  431. }
  432. }
  433. void DebugServices::AddWeightsBiasInputs(std::vector<std::shared_ptr<TensorData>> *tensor_list,
  434. const CNodePtr &kernel) {
  435. if (kernel) {
  436. auto input_size = AnfAlgo::GetInputTensorNum(kernel);
  437. for (size_t j = 0; j < input_size; ++j) {
  438. auto input_kernel = kernel->input(j + 1);
  439. std::string input_kernel_name = input_kernel->fullname_with_scope();
  440. auto found_dot = input_kernel_name.find_last_of('.');
  441. if (found_dot != std::string::npos &&
  442. (input_kernel_name.substr(found_dot + 1) == "weight" || input_kernel_name.substr(found_dot + 1) == "bias")) {
  443. std::string locate_tensor = input_kernel_name + ":0";
  444. std::map<std::string, std::shared_ptr<TensorData>> tensor_map = tensor_loader_->GetTensorMap();
  445. std::map<std::string, std::shared_ptr<TensorData>>::iterator iter;
  446. iter = tensor_map.find(locate_tensor);
  447. if (iter != tensor_map.end()) {
  448. tensor_list->push_back(iter->second);
  449. }
  450. }
  451. }
  452. }
  453. }
  454. TensorLoader *DebugServices::tensor_loader() const { return tensor_loader_; }
  455. std::unordered_map<unsigned int, DebugServices::watchpoint_t> DebugServices::GetWatchpointTable() {
  456. return watchpoint_table;
  457. }
  458. } // namespace mindspore