You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensorprint_utils.cc 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/tensorprint_utils.h"
  17. #include <atomic>
  18. #include <thread>
  19. #include <fstream>
  20. #include <memory>
  21. #include <string>
  22. #include <vector>
  23. #include "ir/meta_tensor.h"
  24. #include "device/convert_tensor_utils.h"
  25. #include "./securec.h"
  26. #ifndef NO_DLIB
  27. #include "tdt/tsd_client.h"
  28. #include "tdt/tdt_host_interface.h"
  29. #include "tdt/data_common.h"
  30. #endif
  31. namespace mindspore {
  32. const char kShapeSeperator[] = ",";
  33. static std::map<std::string, TypeId> print_type_map = {
  34. {"int8_t", TypeId::kNumberTypeInt8}, {"uint8_t", TypeId::kNumberTypeUInt8},
  35. {"int16_t", TypeId::kNumberTypeInt16}, {"uint16_t", TypeId::kNumberTypeUInt16},
  36. {"int32_t", TypeId::kNumberTypeInt32}, {"uint32_t", TypeId::kNumberTypeUInt32},
  37. {"int64_t", TypeId::kNumberTypeInt64}, {"uint64_t", TypeId::kNumberTypeUInt64},
  38. {"float16", TypeId::kNumberTypeFloat16}, {"float", TypeId::kNumberTypeFloat32},
  39. {"double", TypeId::kNumberTypeFloat64}, {"bool", TypeId::kNumberTypeBool}};
  40. static std::map<std::string, size_t> type_size_map = {
  41. {"int8_t", sizeof(int8_t)}, {"uint8_t", sizeof(uint8_t)}, {"int16_t", sizeof(int16_t)},
  42. {"uint16_t", sizeof(uint16_t)}, {"int32_t", sizeof(int32_t)}, {"uint32_t", sizeof(uint32_t)},
  43. {"int64_t", sizeof(int64_t)}, {"uint64_t", sizeof(uint64_t)}, {"float16", sizeof(float) / 2},
  44. {"float", sizeof(float)}, {"double", sizeof(double)}, {"bool", sizeof(bool)}};
  45. bool ParseTensorShape(const std::string &input_shape_str, std::vector<int> *const tensor_shape, size_t *dims) {
  46. if (tensor_shape == nullptr) {
  47. return false;
  48. }
  49. std::string shape_str = input_shape_str;
  50. if (shape_str.size() <= 2) {
  51. return false;
  52. }
  53. (void)shape_str.erase(shape_str.begin());
  54. shape_str.pop_back();
  55. shape_str += kShapeSeperator;
  56. string::size_type pos_begin = 0;
  57. string::size_type pos_end = shape_str.find(kShapeSeperator);
  58. while (pos_end != std::string::npos) {
  59. string dim_str = shape_str.substr(pos_begin, pos_end - pos_begin);
  60. tensor_shape->emplace_back(std::stoi(dim_str));
  61. (*dims) = (*dims) * std::stoul(dim_str);
  62. pos_begin = pos_end + sizeof(kShapeSeperator) - 1;
  63. pos_end = shape_str.find(kShapeSeperator, pos_begin);
  64. }
  65. return true;
  66. }
  67. bool PrintTensorToString(const char *str_data_ptr, mindspore::tensor::Tensor *const print_tensor,
  68. const size_t &memory_size) {
  69. auto *tensor_data_ptr = static_cast<uint8_t *>(print_tensor->data_c(true));
  70. MS_EXCEPTION_IF_NULL(tensor_data_ptr);
  71. auto cp_ret =
  72. memcpy_s(tensor_data_ptr, static_cast<size_t>(print_tensor->data().nbytes()), str_data_ptr, memory_size);
  73. if (cp_ret != EOK) {
  74. MS_LOG(ERROR) << "Print op Failed to copy the memory to py::tensor " << cp_ret;
  75. return false;
  76. }
  77. return true;
  78. }
  79. #ifndef NO_DLIB
  80. bool ConvertDataItem2Tensor(const std::vector<tdt::DataItem> &items) {
  81. // Acquire Python GIL
  82. py::gil_scoped_acquire gil_acquire;
  83. std::ostringstream buf;
  84. bool ret_end_sequence = false;
  85. for (auto &item : items) {
  86. if (item.dataType_ == tdt::TDT_END_OF_SEQUENCE) {
  87. ret_end_sequence = true;
  88. break;
  89. }
  90. std::vector<int> tensor_shape;
  91. size_t totaldims = 1;
  92. if (!ParseTensorShape(item.tensorShape_, &tensor_shape, &totaldims)) {
  93. MS_LOG(ERROR) << "Tensor print can not parse tensor shape, receive info" << item.tensorShape_;
  94. continue;
  95. }
  96. std::shared_ptr<std::string> str_data_ptr = std::static_pointer_cast<std::string>(item.dataPtr_);
  97. MS_EXCEPTION_IF_NULL(str_data_ptr);
  98. if (item.tensorType_ == "string") {
  99. std::string data(reinterpret_cast<const char *>(str_data_ptr->c_str()), item.dataLen_);
  100. buf << data << std::endl;
  101. } else {
  102. auto type_iter = print_type_map.find(item.tensorType_);
  103. if (type_iter == print_type_map.end()) {
  104. MS_LOG(ERROR) << "type of tensor need to print is not support " << item.tensorType_;
  105. continue;
  106. }
  107. auto type_id = type_iter->second;
  108. mindspore::tensor::Tensor print_tensor(type_id, tensor_shape);
  109. auto memory_size = totaldims * type_size_map[item.tensorType_];
  110. if (PrintTensorToString(str_data_ptr->data(), &print_tensor, memory_size)) {
  111. buf << print_tensor.ToStringRepr() << std::endl;
  112. }
  113. }
  114. }
  115. std::cout << buf.str() << std::endl;
  116. return ret_end_sequence;
  117. }
  118. void TensorPrint::operator()() {
  119. while (true) {
  120. std::vector<tdt::DataItem> bundle;
  121. if (tdt::TdtHostPopData("_npu_log", bundle) != 0) {
  122. break;
  123. }
  124. if (ConvertDataItem2Tensor(bundle)) {
  125. break;
  126. }
  127. }
  128. }
  129. #endif
  130. } // namespace mindspore