You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_summary.cc 14 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cmath>
  17. #include <algorithm>
  18. #include <future>
  19. #include <limits>
  20. #include <memory>
  21. #include <bitset>
  22. #include <tuple>
  23. #include <type_traits>
  24. #include "debug/debugger/tensor_summary.h"
  25. #ifdef OFFLINE_DBG_MODE
  26. #include "base/float16.h"
  27. #endif
  28. #ifdef ONLINE_DBG_MODE
  29. namespace mindspore {
  30. #endif
  31. using CONDITION_TYPE = DebugServices::CONDITION_TYPE;
  32. RangeCountCalculator::RangeCountCalculator()
  33. : range_start_inclusive(-std::numeric_limits<double>::infinity()),
  34. range_end_inclusive(std::numeric_limits<double>::infinity()),
  35. count(0),
  36. total(0) {}
  37. void RangeCountCalculator::ProcessElement(double element) {
  38. count += (element >= range_start_inclusive && element <= range_end_inclusive);
  39. total += 1;
  40. }
  41. double RangeCountCalculator::GetPercentInRange() const {
  42. if (total == 0) {
  43. return 0.0;
  44. }
  45. const double factor = 100.0;
  46. return factor * count / total;
  47. }
  48. AllCloseCalculator::AllCloseCalculator() : atol(1.0e-8), rtol(1.0e-5), result(true) {}
  49. void AllCloseCalculator::ProcessElement(double current, double previous) {
  50. result = result && (std::abs(current - previous) <= (atol + rtol * std::abs(previous)));
  51. }
  52. bool AllCloseCalculator::IsAllClose() const { return result; }
  53. MeanCalculator::MeanCalculator() : mean(0.0), count(0) {}
  54. void MeanCalculator::ProcessElement(double value) {
  55. count += 1;
  56. double delta = value - mean;
  57. mean += delta / count;
  58. }
  59. double MeanCalculator::GetMean() const { return mean; }
  60. VarianceAndMeanCalculator::VarianceAndMeanCalculator() : mean(0.0), count(0), m2(0.0) {}
  61. void VarianceAndMeanCalculator::ProcessElement(double value) {
  62. count += 1;
  63. double delta = value - mean;
  64. mean += delta / count;
  65. m2 += delta * (value - mean);
  66. }
  67. double VarianceAndMeanCalculator::GetMean() const { return mean; }
  68. double VarianceAndMeanCalculator::GetVariance() const {
  69. if (count > 1) {
  70. return m2 / (count - 1);
  71. }
  72. return 0.0;
  73. }
  74. double VarianceAndMeanCalculator::GetStandardDeviation() { return sqrt(GetVariance()); }
  75. template <typename T>
  76. TensorSummary<T>::TensorSummary(const void *current_tensor_ptr, const void *const previous_tensor_ptr,
  77. uint32_t num_elements, uint32_t prev_num_elements)
  78. : current_tensor_ptr_(reinterpret_cast<const T *>(current_tensor_ptr)),
  79. prev_tensor_ptr_(reinterpret_cast<const T *>(previous_tensor_ptr)),
  80. num_elements_(num_elements),
  81. prev_num_elements_(prev_num_elements),
  82. min_(std::numeric_limits<double>::max()),
  83. max_(std::numeric_limits<double>::lowest()),
  84. avg_(0.0),
  85. is_bool_(false),
  86. neg_zero_count_(0),
  87. pos_zero_count_(0),
  88. pos_inf_count_(0),
  89. neg_inf_count_(0),
  90. inf_count_(0),
  91. nan_count_(0),
  92. zero_count_(0),
  93. epsilon_(1.0e-9),
  94. mean_sd_cal_enabled_(false) {}
  95. template <typename T>
  96. void TensorSummary<T>::SummarizeTensor(const std::vector<DebugServices::watchpoint_t> &wps) {
  97. InitCalculators(wps);
  98. for (size_t i = 0; i < num_elements_; ++i) {
  99. auto current_value = static_cast<double>(current_tensor_ptr_[i]);
  100. double previous_value = std::numeric_limits<double>::quiet_NaN();
  101. if (prev_tensor_ptr_) {
  102. if (num_elements_ == prev_num_elements_) {
  103. previous_value = static_cast<double>(prev_tensor_ptr_[i]);
  104. } else {
  105. MS_LOG(DEBUG) << "Current and previous tensor are not the same size.";
  106. }
  107. }
  108. if (std::isinf(current_value)) {
  109. inf_count_ += 1;
  110. }
  111. if (std::isnan(current_value)) {
  112. nan_count_ += 1;
  113. }
  114. if (current_value == 0) {
  115. zero_count_ += 1;
  116. }
  117. max_ = std::max(max_, current_value);
  118. min_ = std::min(min_, current_value);
  119. if (mean_sd_cal_enabled_) {
  120. current_mean_variance_.ProcessElement(current_value);
  121. }
  122. for (auto &it : all_close_) {
  123. it.second->ProcessElement(current_value, previous_value);
  124. }
  125. for (auto &range_count : range_counts_) {
  126. range_count.second->ProcessElement(current_value);
  127. }
  128. for (auto &mean : means_) {
  129. if (mean.first.compare("curr_prev_diff_mean") == 0) {
  130. mean.second->ProcessElement(std::abs(current_value - previous_value));
  131. } else if (mean.first.compare("abs_prev_mean") == 0) {
  132. mean.second->ProcessElement(std::abs(previous_value));
  133. } else if (mean.first.compare("abs_current_mean") == 0) {
  134. mean.second->ProcessElement(std::abs(current_value));
  135. }
  136. }
  137. }
  138. }
  139. template <typename T>
  140. void TensorSummary<T>::TensorStatistics(DbgDataType dtype_value) {
  141. if (dtype_value == DT_BOOL) {
  142. is_bool_ = true;
  143. }
  144. const int default_threads = 32;
  145. const int default_elements_per_thread = 10000;
  146. if (num_elements_ <= default_elements_per_thread) {
  147. return TensorStatisticsSingleThread();
  148. }
  149. int desired_threads = num_elements_ / default_elements_per_thread;
  150. int actual_threads = std::min(desired_threads, default_threads);
  151. int actual_elements_per_thread = num_elements_ / actual_threads;
  152. // Use multithread to calculate statistic on chunks of data
  153. void *previous_tensor_ptr = nullptr;
  154. size_t offset = 0;
  155. std::vector<std::unique_ptr<TensorSummary<T>>> summary_vec;
  156. std::vector<std::future<void>> summary_future_vec;
  157. for (int i = 0; i < actual_threads; i++) {
  158. int num_elements_for_thread;
  159. if (i == actual_threads - 1) {
  160. num_elements_for_thread = num_elements_ - offset;
  161. } else {
  162. num_elements_for_thread = actual_elements_per_thread;
  163. }
  164. summary_vec.emplace_back(std::make_unique<TensorSummary<T>>(current_tensor_ptr_ + offset, previous_tensor_ptr,
  165. num_elements_for_thread, 0));
  166. summary_future_vec.emplace_back(
  167. std::async(std::launch::async, &TensorSummary<T>::TensorStatisticsSingleThread, summary_vec[i].get()));
  168. offset += num_elements_for_thread;
  169. }
  170. // Aggregate results of all chunks
  171. num_elements_ = 0; // Let current tensor weight 0 in the aggregation
  172. for (unsigned int i = 0; i < summary_future_vec.size(); i++) {
  173. summary_future_vec[i].wait();
  174. summary_future_vec[i].get();
  175. auto &cur_summary = *(summary_vec[i]);
  176. num_elements_ += cur_summary.num_elements_;
  177. min_ = std::min(min_, cur_summary.min_);
  178. max_ = std::max(max_, cur_summary.max_);
  179. double avg_delta = cur_summary.avg_ - avg_;
  180. avg_ += avg_delta * (cur_summary.num_elements_ / num_elements_);
  181. neg_zero_count_ += cur_summary.neg_zero_count_;
  182. pos_zero_count_ += cur_summary.pos_zero_count_;
  183. neg_inf_count_ += cur_summary.neg_inf_count_;
  184. pos_inf_count_ += cur_summary.pos_inf_count_;
  185. inf_count_ += cur_summary.inf_count_;
  186. nan_count_ += cur_summary.nan_count_;
  187. zero_count_ += cur_summary.zero_count_;
  188. }
  189. }
  190. template <typename T>
  191. void TensorSummary<T>::TensorStatisticsSingleThread() {
  192. MeanCalculator mean_calc = MeanCalculator();
  193. for (size_t i = 0; i < num_elements_; ++i) {
  194. auto current_value = static_cast<double>(current_tensor_ptr_[i]);
  195. if (std::isinf(current_value)) {
  196. if (current_value > 0) {
  197. pos_inf_count_ += 1;
  198. } else {
  199. neg_inf_count_ += 1;
  200. }
  201. }
  202. if (current_value == 0) {
  203. zero_count_ += 1;
  204. }
  205. if (std::isnan(current_value)) {
  206. nan_count_ += 1;
  207. }
  208. if (!(std::isnan(current_value) || std::isinf(current_value))) {
  209. // only considering tensor elements with value
  210. if (std::signbit(current_value) && !(current_value == 0)) {
  211. neg_zero_count_ += 1;
  212. } else if (!(current_value == 0)) {
  213. pos_zero_count_ += 1;
  214. }
  215. max_ = std::max(max_, current_value);
  216. min_ = std::min(min_, current_value);
  217. mean_calc.ProcessElement(current_value);
  218. }
  219. }
  220. avg_ = mean_calc.GetMean();
  221. }
  222. template <typename T>
  223. std::tuple<bool, int, std::vector<DebugServices::parameter_t>> TensorSummary<T>::IsWatchpointHit(
  224. DebugServices::watchpoint_t wp) {
  225. auto parameter_list = wp.parameter_list;
  226. bool hit = false;
  227. const uint8_t bit_size = 32;
  228. std::bitset<bit_size> error_code;
  229. CONDITION_TYPE type = wp.condition.type;
  230. // bit 0 denotes presence of nan
  231. (void)error_code.set(0, nan_count_ > 0);
  232. // bit 1 denotes presence of inf
  233. (void)error_code.set(1, inf_count_ > 0);
  234. if (type == CONDITION_TYPE::HAS_NAN) {
  235. error_code.reset();
  236. hit = nan_count_ > 0;
  237. } else if (type == CONDITION_TYPE::HAS_INF) {
  238. error_code.reset();
  239. hit = inf_count_ > 0;
  240. } else if (type == CONDITION_TYPE::GENERAL_OVERFLOW) {
  241. error_code.reset();
  242. hit = (nan_count_ + inf_count_) > 0;
  243. } else if (type == CONDITION_TYPE::NOT_CHANGED && prev_tensor_ptr_ && error_code.none()) {
  244. hit = all_close_[wp.id]->IsAllClose();
  245. } else if ((type == CONDITION_TYPE::NOT_CHANGED || type == CONDITION_TYPE::CHANGE_TOO_LARGE ||
  246. type == CONDITION_TYPE::CHANGE_TOO_SMALL) &&
  247. !prev_tensor_ptr_) {
  248. // bit 2 denotes absence of previous tensor
  249. error_code.set(2, true);
  250. }
  251. if (error_code.none()) {
  252. for (auto &parameter : parameter_list) {
  253. if (parameter.disabled || error_code.any()) {
  254. continue;
  255. }
  256. // extract inequality type from watchpoint for backward compatibility
  257. std::string inequality_type;
  258. if (wp.is_gt_wp()) {
  259. inequality_type = "gt";
  260. } else if (wp.is_lt_wp()) {
  261. inequality_type = "lt";
  262. }
  263. parameter.Evaluate(StatLookup(parameter.name, wp), inequality_type);
  264. hit = hit || parameter.hit;
  265. }
  266. }
  267. return std::make_tuple(hit, static_cast<int32_t>(error_code.to_ulong()), parameter_list);
  268. }
  269. template <typename T>
  270. double_t TensorSummary<T>::StatLookup(const std::string &parameter_name, const DebugServices::watchpoint_t &wp) {
  271. if (parameter_name == "param") return StatLookup(wp);
  272. std::string param_type;
  273. auto pos = parameter_name.find_last_of('_');
  274. if (pos != std::string::npos) {
  275. param_type = parameter_name.substr(0, pos);
  276. }
  277. if (param_type == "max") {
  278. return max_;
  279. }
  280. if (param_type == "min") {
  281. return min_;
  282. }
  283. if (param_type == "max_min") {
  284. return max_ - min_;
  285. }
  286. if (param_type == "mean") {
  287. return current_mean_variance_.GetMean();
  288. }
  289. if (param_type == "sd") {
  290. return current_mean_variance_.GetStandardDeviation();
  291. }
  292. if (param_type == "abs_mean") {
  293. if (means_.find("abs_current_mean") != means_.end()) {
  294. return means_["abs_current_mean"]->GetMean();
  295. }
  296. }
  297. if (param_type == "abs_mean_update_ratio" && prev_tensor_ptr_) {
  298. if (means_.find("curr_prev_diff_mean") != means_.end() && means_.find("abs_prev_mean") != means_.end()) {
  299. return means_["curr_prev_diff_mean"]->GetMean() / (means_["abs_prev_mean"]->GetMean() + epsilon_);
  300. }
  301. }
  302. if (param_type == "range_percentage") {
  303. if (range_counts_.find(wp.id) != range_counts_.end()) {
  304. return range_counts_[wp.id]->GetPercentInRange();
  305. }
  306. }
  307. if (param_type == "zero_percentage") {
  308. return GetZeroValPercent();
  309. }
  310. return std::numeric_limits<double_t>::quiet_NaN();
  311. }
  312. template <typename T>
  313. double_t TensorSummary<T>::StatLookup(const DebugServices::watchpoint_t &wp) {
  314. CONDITION_TYPE type = wp.condition.type;
  315. if (type == CONDITION_TYPE::MAX_LT || type == CONDITION_TYPE::MAX_GT) {
  316. return max_;
  317. }
  318. if (type == CONDITION_TYPE::MIN_LT || type == CONDITION_TYPE::MIN_GT) {
  319. return min_;
  320. }
  321. if (type == CONDITION_TYPE::MEAN_LT || type == CONDITION_TYPE::MEAN_GT) {
  322. return current_mean_variance_.GetMean();
  323. }
  324. if (type == CONDITION_TYPE::SD_LT || type == CONDITION_TYPE::SD_GT) {
  325. return current_mean_variance_.GetStandardDeviation();
  326. }
  327. if (type == CONDITION_TYPE::MAX_MIN_GT || type == CONDITION_TYPE::MAX_MIN_LT) {
  328. return max_ - min_;
  329. }
  330. return std::numeric_limits<double_t>::quiet_NaN();
  331. }
  332. template <typename T>
  333. double_t TensorSummary<T>::GetZeroValPercent() {
  334. if (num_elements_ == 0) {
  335. return 0;
  336. }
  337. return (zero_count_ * 100.0) / num_elements_;
  338. }
  339. template <typename T>
  340. void TensorSummary<T>::InitCalculators(const std::vector<DebugServices::watchpoint_t> &wps) {
  341. for (auto &wp : wps) {
  342. auto wp_id = wp.id;
  343. mean_sd_cal_enabled_ = mean_sd_cal_enabled_ || wp.mean_sd_enabled();
  344. if (wp.allclose_enabled() && prev_tensor_ptr_) {
  345. all_close_[wp_id] = std::make_unique<AllCloseCalculator>();
  346. if (!wp.parameter_list[0].disabled) {
  347. all_close_[wp_id]->set_rtol(wp.parameter_list[0].value);
  348. }
  349. if (!wp.parameter_list[1].disabled) {
  350. all_close_[wp_id]->set_atol(wp.parameter_list[1].value);
  351. }
  352. } else if (wp.range_enabled()) {
  353. range_counts_[wp_id] = std::make_unique<RangeCountCalculator>();
  354. if (!wp.parameter_list[0].disabled) {
  355. range_counts_[wp_id]->set_range_start_inclusive(wp.parameter_list[0].value);
  356. }
  357. if (!wp.parameter_list[1].disabled) {
  358. range_counts_[wp_id]->set_range_end_inclusive(wp.parameter_list[1].value);
  359. }
  360. } else if (wp.tensor_update_ratio_mean_enabled() && prev_tensor_ptr_) {
  361. (void)means_.emplace("curr_prev_diff_mean", std::make_unique<MeanCalculator>());
  362. (void)means_.emplace("abs_prev_mean", std::make_unique<MeanCalculator>());
  363. } else if (wp.abs_mean_enabled()) {
  364. (void)means_.emplace("abs_current_mean", std::make_unique<MeanCalculator>());
  365. }
  366. }
  367. }
  368. template class TensorSummary<uint8_t>;
  369. template class TensorSummary<int8_t>;
  370. template class TensorSummary<uint16_t>;
  371. template class TensorSummary<int16_t>;
  372. template class TensorSummary<uint32_t>;
  373. template class TensorSummary<int32_t>;
  374. template class TensorSummary<uint64_t>;
  375. template class TensorSummary<int64_t>;
  376. template class TensorSummary<float16>;
  377. template class TensorSummary<float>;
  378. template class TensorSummary<double>;
  379. template class TensorSummary<bool>;
  380. #ifdef ONLINE_DBG_MODE
  381. } // namespace mindspore
  382. #endif