You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_summary.cc 16 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /**
  2. * Copyright 2020-2022 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cmath>
  17. #include <algorithm>
  18. #include <future>
  19. #include <limits>
  20. #include <memory>
  21. #include <bitset>
  22. #include <tuple>
  23. #include <type_traits>
  24. #include "debug/debugger/tensor_summary.h"
  25. #ifdef OFFLINE_DBG_MODE
  26. #include "base/float16.h"
  27. #endif
  28. #ifdef ONLINE_DBG_MODE
  29. namespace mindspore {
  30. #endif
  31. using CONDITION_TYPE = DebugServices::CONDITION_TYPE;
  32. RangeCountCalculator::RangeCountCalculator()
  33. : range_start_inclusive(-std::numeric_limits<double>::infinity()),
  34. range_end_inclusive(std::numeric_limits<double>::infinity()),
  35. count(0),
  36. total(0) {}
  37. void RangeCountCalculator::ProcessElement(double element) {
  38. count += (element >= range_start_inclusive && element <= range_end_inclusive);
  39. total += 1;
  40. }
  41. double RangeCountCalculator::GetPercentInRange() const {
  42. if (total == 0) {
  43. return 0.0;
  44. }
  45. const double factor = 100.0;
  46. return factor * count / total;
  47. }
  48. AllCloseCalculator::AllCloseCalculator() : atol(1.0e-8), rtol(1.0e-5), result(true) {}
  49. void AllCloseCalculator::ProcessElement(double current, double previous) {
  50. result = result && (std::abs(current - previous) <= (atol + rtol * std::abs(previous)));
  51. }
  52. bool AllCloseCalculator::IsAllClose() const { return result; }
  53. MeanCalculator::MeanCalculator() : mean(0.0), count(0) {}
  54. void MeanCalculator::ProcessElement(double value) {
  55. count += 1;
  56. double delta = value - mean;
  57. mean += delta / count;
  58. }
  59. double MeanCalculator::GetMean() const { return mean; }
  60. VarianceAndMeanCalculator::VarianceAndMeanCalculator() : mean(0.0), count(0), m2(0.0) {}
  61. void VarianceAndMeanCalculator::ProcessElement(double value) {
  62. count += 1;
  63. double delta = value - mean;
  64. mean += delta / count;
  65. m2 += delta * (value - mean);
  66. }
  67. double VarianceAndMeanCalculator::GetMean() const { return mean; }
  68. double VarianceAndMeanCalculator::GetVariance() const {
  69. if (count > 1) {
  70. return m2 / (count - 1);
  71. }
  72. return 0.0;
  73. }
  74. double VarianceAndMeanCalculator::GetStandardDeviation() { return sqrt(GetVariance()); }
  75. template <typename T>
  76. TensorSummary<T>::TensorSummary(const void *current_tensor_ptr, const void *const previous_tensor_ptr,
  77. uint32_t num_elements, uint32_t prev_num_elements)
  78. : current_tensor_ptr_(reinterpret_cast<const T *>(current_tensor_ptr)),
  79. prev_tensor_ptr_(reinterpret_cast<const T *>(previous_tensor_ptr)),
  80. num_elements_(num_elements),
  81. prev_num_elements_(prev_num_elements),
  82. min_(std::numeric_limits<double>::max()),
  83. max_(std::numeric_limits<double>::lowest()),
  84. avg_(0.0),
  85. is_bool_(false),
  86. neg_zero_count_(0),
  87. pos_zero_count_(0),
  88. pos_inf_count_(0),
  89. neg_inf_count_(0),
  90. inf_count_(0),
  91. nan_count_(0),
  92. zero_count_(0),
  93. epsilon_(1.0e-9),
  94. mean_sd_cal_enabled_(false) {}
  95. /*
  96. * Feature group: Online debugger, Offline debugger.
  97. * Target device group: Ascend, GPU.
  98. * Runtime category: Old runtime, MindRT.
  99. * Description: Initialize watchpoints calculators based on the watchpoint category. Process all the elements within the
  100. * current tensor.
  101. */
  102. template <typename T>
  103. void TensorSummary<T>::SummarizeTensor(const std::vector<DebugServices::watchpoint_t> &wps) {
  104. InitCalculators(wps);
  105. for (size_t i = 0; i < num_elements_; ++i) {
  106. auto current_value = static_cast<double>(current_tensor_ptr_[i]);
  107. double previous_value = std::numeric_limits<double>::quiet_NaN();
  108. if (prev_tensor_ptr_) {
  109. if (num_elements_ == prev_num_elements_) {
  110. previous_value = static_cast<double>(prev_tensor_ptr_[i]);
  111. } else {
  112. MS_LOG(DEBUG) << "Current and previous tensor are not the same size.";
  113. }
  114. }
  115. if (std::isinf(current_value)) {
  116. inf_count_ += 1;
  117. }
  118. if (std::isnan(current_value)) {
  119. nan_count_ += 1;
  120. }
  121. if (current_value == 0) {
  122. zero_count_ += 1;
  123. }
  124. max_ = std::max(max_, current_value);
  125. min_ = std::min(min_, current_value);
  126. if (mean_sd_cal_enabled_) {
  127. current_mean_variance_.ProcessElement(current_value);
  128. }
  129. for (auto &it : all_close_) {
  130. it.second->ProcessElement(current_value, previous_value);
  131. }
  132. for (auto &range_count : range_counts_) {
  133. range_count.second->ProcessElement(current_value);
  134. }
  135. for (auto &mean : means_) {
  136. if (mean.first.compare("curr_prev_diff_mean") == 0) {
  137. mean.second->ProcessElement(std::abs(current_value - previous_value));
  138. } else if (mean.first.compare("abs_prev_mean") == 0) {
  139. mean.second->ProcessElement(std::abs(previous_value));
  140. } else if (mean.first.compare("abs_current_mean") == 0) {
  141. mean.second->ProcessElement(std::abs(current_value));
  142. }
  143. }
  144. }
  145. }
  146. /*
  147. * Feature group: Online debugger, Offline debugger.
  148. * Target device group: Ascend, GPU.
  149. * Runtime category: Old runtime, MindRT.
  150. * Description: Calculates statistics on chunks of data.
  151. */
  152. template <typename T>
  153. void TensorSummary<T>::TensorStatistics(DbgDataType dtype_value) {
  154. if (dtype_value == DT_BOOL) {
  155. is_bool_ = true;
  156. }
  157. const int default_threads = 32;
  158. const int default_elements_per_thread = 10000;
  159. if (num_elements_ <= default_elements_per_thread) {
  160. return TensorStatisticsSingleThread();
  161. }
  162. int desired_threads = num_elements_ / default_elements_per_thread;
  163. int actual_threads = std::min(desired_threads, default_threads);
  164. int actual_elements_per_thread = num_elements_ / actual_threads;
  165. // Use multithread to calculate statistic on chunks of data
  166. void *previous_tensor_ptr = nullptr;
  167. size_t offset = 0;
  168. std::vector<std::unique_ptr<TensorSummary<T>>> summary_vec;
  169. std::vector<std::future<void>> summary_future_vec;
  170. for (int i = 0; i < actual_threads; i++) {
  171. int num_elements_for_thread;
  172. if (i == actual_threads - 1) {
  173. num_elements_for_thread = num_elements_ - offset;
  174. } else {
  175. num_elements_for_thread = actual_elements_per_thread;
  176. }
  177. summary_vec.emplace_back(std::make_unique<TensorSummary<T>>(current_tensor_ptr_ + offset, previous_tensor_ptr,
  178. num_elements_for_thread, 0));
  179. summary_future_vec.emplace_back(
  180. std::async(std::launch::async, &TensorSummary<T>::TensorStatisticsSingleThread, summary_vec[i].get()));
  181. offset += num_elements_for_thread;
  182. }
  183. // Aggregate results of all chunks
  184. num_elements_ = 0; // Let current tensor weight 0 in the aggregation
  185. for (unsigned int i = 0; i < summary_future_vec.size(); i++) {
  186. summary_future_vec[i].wait();
  187. summary_future_vec[i].get();
  188. auto &cur_summary = *(summary_vec[i]);
  189. num_elements_ += cur_summary.num_elements_;
  190. min_ = std::min(min_, cur_summary.min_);
  191. max_ = std::max(max_, cur_summary.max_);
  192. double avg_delta = cur_summary.avg_ - avg_;
  193. avg_ += avg_delta * (cur_summary.num_elements_ / num_elements_);
  194. neg_zero_count_ += cur_summary.neg_zero_count_;
  195. pos_zero_count_ += cur_summary.pos_zero_count_;
  196. neg_inf_count_ += cur_summary.neg_inf_count_;
  197. pos_inf_count_ += cur_summary.pos_inf_count_;
  198. inf_count_ += cur_summary.inf_count_;
  199. nan_count_ += cur_summary.nan_count_;
  200. zero_count_ += cur_summary.zero_count_;
  201. }
  202. }
  203. /*
  204. * Feature group: Online debugger, Offline debugger.
  205. * Target device group: Ascend, GPU.
  206. * Runtime category: Old runtime, MindRT.
  207. * Description: Process all the elements of the chunked data and calculates the statistics.
  208. */
  209. template <typename T>
  210. void TensorSummary<T>::TensorStatisticsSingleThread() {
  211. MeanCalculator mean_calc = MeanCalculator();
  212. for (size_t i = 0; i < num_elements_; ++i) {
  213. auto current_value = static_cast<double>(current_tensor_ptr_[i]);
  214. if (std::isinf(current_value)) {
  215. if (current_value > 0) {
  216. pos_inf_count_ += 1;
  217. } else {
  218. neg_inf_count_ += 1;
  219. }
  220. }
  221. if (current_value == 0) {
  222. zero_count_ += 1;
  223. }
  224. if (std::isnan(current_value)) {
  225. nan_count_ += 1;
  226. }
  227. if (!(std::isnan(current_value) || std::isinf(current_value))) {
  228. // only considering tensor elements with value
  229. if (std::signbit(current_value) && !(current_value == 0)) {
  230. neg_zero_count_ += 1;
  231. } else if (!(current_value == 0)) {
  232. pos_zero_count_ += 1;
  233. }
  234. max_ = std::max(max_, current_value);
  235. min_ = std::min(min_, current_value);
  236. mean_calc.ProcessElement(current_value);
  237. }
  238. }
  239. avg_ = mean_calc.GetMean();
  240. }
  241. /*
  242. * Feature group: Online debugger, Offline debugger.
  243. * Target device group: Ascend, GPU.
  244. * Runtime category: Old runtime, MindRT.
  245. * Description: Returns a tuple with three elements, the first element is a bool and it is true if the watchpoint is
  246. * hit. The second element is the error_code which is set in this function and the third element is the parameter_list
  247. * for the watchpoint.
  248. */
  249. template <typename T>
  250. std::tuple<bool, int, std::vector<DebugServices::parameter_t>> TensorSummary<T>::IsWatchpointHit(
  251. DebugServices::watchpoint_t wp) {
  252. auto parameter_list = wp.parameter_list;
  253. bool hit = false;
  254. const uint8_t bit_size = 32;
  255. std::bitset<bit_size> error_code;
  256. CONDITION_TYPE type = wp.condition.type;
  257. // bit 0 denotes presence of nan
  258. (void)error_code.set(0, nan_count_ > 0);
  259. // bit 1 denotes presence of inf
  260. (void)error_code.set(1, inf_count_ > 0);
  261. if (type == CONDITION_TYPE::HAS_NAN) {
  262. error_code.reset();
  263. hit = nan_count_ > 0;
  264. } else if (type == CONDITION_TYPE::HAS_INF) {
  265. error_code.reset();
  266. hit = inf_count_ > 0;
  267. } else if (type == CONDITION_TYPE::GENERAL_OVERFLOW) {
  268. error_code.reset();
  269. hit = (nan_count_ + inf_count_) > 0;
  270. } else if (type == CONDITION_TYPE::NOT_CHANGED && prev_tensor_ptr_ && error_code.none()) {
  271. hit = all_close_[wp.id]->IsAllClose();
  272. } else if ((type == CONDITION_TYPE::NOT_CHANGED || type == CONDITION_TYPE::CHANGE_TOO_LARGE ||
  273. type == CONDITION_TYPE::CHANGE_TOO_SMALL) &&
  274. !prev_tensor_ptr_) {
  275. // bit 2 denotes absence of previous tensor
  276. error_code.set(2, true);
  277. }
  278. if (error_code.none()) {
  279. for (auto &parameter : parameter_list) {
  280. if (parameter.disabled || error_code.any()) {
  281. continue;
  282. }
  283. // extract inequality type from watchpoint for backward compatibility
  284. std::string inequality_type;
  285. if (wp.is_gt_wp()) {
  286. inequality_type = "gt";
  287. } else if (wp.is_lt_wp()) {
  288. inequality_type = "lt";
  289. }
  290. parameter.Evaluate(StatLookup(parameter.name, wp), inequality_type);
  291. hit = hit || parameter.hit;
  292. }
  293. }
  294. return std::make_tuple(hit, static_cast<int32_t>(error_code.to_ulong()), parameter_list);
  295. }
  296. template <typename T>
  297. double_t TensorSummary<T>::StatLookup(const std::string &parameter_name, const DebugServices::watchpoint_t &wp) {
  298. if (parameter_name == "param") return StatLookup(wp);
  299. std::string param_type;
  300. auto pos = parameter_name.find_last_of('_');
  301. if (pos != std::string::npos) {
  302. param_type = parameter_name.substr(0, pos);
  303. }
  304. if (param_type == "max") {
  305. return max_;
  306. }
  307. if (param_type == "min") {
  308. return min_;
  309. }
  310. if (param_type == "max_min") {
  311. return max_ - min_;
  312. }
  313. if (param_type == "mean") {
  314. return current_mean_variance_.GetMean();
  315. }
  316. if (param_type == "sd") {
  317. return current_mean_variance_.GetStandardDeviation();
  318. }
  319. if (param_type == "abs_mean") {
  320. if (means_.find("abs_current_mean") != means_.end()) {
  321. return means_["abs_current_mean"]->GetMean();
  322. }
  323. }
  324. if (param_type == "abs_mean_update_ratio" && prev_tensor_ptr_) {
  325. if (means_.find("curr_prev_diff_mean") != means_.end() && means_.find("abs_prev_mean") != means_.end()) {
  326. return means_["curr_prev_diff_mean"]->GetMean() / (means_["abs_prev_mean"]->GetMean() + epsilon_);
  327. }
  328. }
  329. if (param_type == "range_percentage") {
  330. if (range_counts_.find(wp.id) != range_counts_.end()) {
  331. return range_counts_[wp.id]->GetPercentInRange();
  332. }
  333. }
  334. if (param_type == "zero_percentage") {
  335. return GetZeroValPercent();
  336. }
  337. return std::numeric_limits<double_t>::quiet_NaN();
  338. }
  339. template <typename T>
  340. double_t TensorSummary<T>::StatLookup(const DebugServices::watchpoint_t &wp) {
  341. CONDITION_TYPE type = wp.condition.type;
  342. if (type == CONDITION_TYPE::MAX_LT || type == CONDITION_TYPE::MAX_GT) {
  343. return max_;
  344. }
  345. if (type == CONDITION_TYPE::MIN_LT || type == CONDITION_TYPE::MIN_GT) {
  346. return min_;
  347. }
  348. if (type == CONDITION_TYPE::MEAN_LT || type == CONDITION_TYPE::MEAN_GT) {
  349. return current_mean_variance_.GetMean();
  350. }
  351. if (type == CONDITION_TYPE::SD_LT || type == CONDITION_TYPE::SD_GT) {
  352. return current_mean_variance_.GetStandardDeviation();
  353. }
  354. if (type == CONDITION_TYPE::MAX_MIN_GT || type == CONDITION_TYPE::MAX_MIN_LT) {
  355. return max_ - min_;
  356. }
  357. return std::numeric_limits<double_t>::quiet_NaN();
  358. }
  359. template <typename T>
  360. double_t TensorSummary<T>::GetZeroValPercent() {
  361. if (num_elements_ == 0) {
  362. return 0;
  363. }
  364. return (zero_count_ * 100.0) / num_elements_;
  365. }
  366. template <typename T>
  367. void TensorSummary<T>::InitCalculators(const std::vector<DebugServices::watchpoint_t> &wps) {
  368. for (auto &wp : wps) {
  369. auto wp_id = wp.id;
  370. mean_sd_cal_enabled_ = mean_sd_cal_enabled_ || wp.mean_sd_enabled();
  371. if (wp.allclose_enabled() && prev_tensor_ptr_) {
  372. all_close_[wp_id] = std::make_unique<AllCloseCalculator>();
  373. if (!wp.parameter_list[0].disabled) {
  374. all_close_[wp_id]->set_rtol(wp.parameter_list[0].value);
  375. }
  376. if (!wp.parameter_list[1].disabled) {
  377. all_close_[wp_id]->set_atol(wp.parameter_list[1].value);
  378. }
  379. } else if (wp.range_enabled()) {
  380. range_counts_[wp_id] = std::make_unique<RangeCountCalculator>();
  381. if (!wp.parameter_list[0].disabled) {
  382. range_counts_[wp_id]->set_range_start_inclusive(wp.parameter_list[0].value);
  383. }
  384. if (!wp.parameter_list[1].disabled) {
  385. range_counts_[wp_id]->set_range_end_inclusive(wp.parameter_list[1].value);
  386. }
  387. } else if (wp.tensor_update_ratio_mean_enabled() && prev_tensor_ptr_) {
  388. (void)means_.emplace("curr_prev_diff_mean", std::make_unique<MeanCalculator>());
  389. (void)means_.emplace("abs_prev_mean", std::make_unique<MeanCalculator>());
  390. } else if (wp.abs_mean_enabled()) {
  391. (void)means_.emplace("abs_current_mean", std::make_unique<MeanCalculator>());
  392. }
  393. }
  394. }
  395. template class TensorSummary<uint8_t>;
  396. template class TensorSummary<int8_t>;
  397. template class TensorSummary<uint16_t>;
  398. template class TensorSummary<int16_t>;
  399. template class TensorSummary<uint32_t>;
  400. template class TensorSummary<int32_t>;
  401. template class TensorSummary<uint64_t>;
  402. template class TensorSummary<int64_t>;
  403. template class TensorSummary<float16>;
  404. template class TensorSummary<float>;
  405. template class TensorSummary<double>;
  406. template class TensorSummary<bool>;
  407. #ifdef ONLINE_DBG_MODE
  408. } // namespace mindspore
  409. #endif