You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

serving_tensor.cc 6.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "core/serving_tensor.h"
  17. #include <vector>
  18. #include <unordered_map>
  19. #include <string>
  20. #include <algorithm>
  21. #include "include/infer_log.h"
  22. using std::string;
  23. using std::unordered_map;
  24. using std::vector;
  25. namespace mindspore {
  26. namespace serving {
  27. using inference::DataType;
  28. using inference::InferTensorBase;
  29. const size_t kMaxShapeElementCount = INT32_MAX;
  30. const size_t kMaxDataBufferSize = UINT32_MAX;
  31. ServingTensor::ServingTensor(ms_serving::Tensor &other) : tensor_(other) {}
  32. ServingTensor::~ServingTensor() {}
  33. DataType ServingTensor::data_type() const {
  34. const std::unordered_map<ms_serving::DataType, inference::DataType> type2id_map{
  35. {ms_serving::MS_UNKNOWN, inference::kMSI_Unknown}, {ms_serving::MS_BOOL, inference::kMSI_Bool},
  36. {ms_serving::MS_INT8, inference::kMSI_Int8}, {ms_serving::MS_UINT8, inference::kMSI_Uint8},
  37. {ms_serving::MS_INT16, inference::kMSI_Int16}, {ms_serving::MS_UINT16, inference::kMSI_Uint16},
  38. {ms_serving::MS_INT32, inference::kMSI_Int32}, {ms_serving::MS_UINT32, inference::kMSI_Uint32},
  39. {ms_serving::MS_INT64, inference::kMSI_Int64}, {ms_serving::MS_UINT64, inference::kMSI_Uint64},
  40. {ms_serving::MS_FLOAT16, inference::kMSI_Float16}, {ms_serving::MS_FLOAT32, inference::kMSI_Float32},
  41. {ms_serving::MS_FLOAT64, inference::kMSI_Float64},
  42. };
  43. auto it = type2id_map.find(tensor_.tensor_type());
  44. if (it == type2id_map.end()) {
  45. MSI_LOG_WARNING << "failed to get data type, undefined data type " << tensor_.tensor_type();
  46. return inference::kMSI_Unknown;
  47. } else {
  48. return it->second;
  49. }
  50. }
  51. void ServingTensor::set_data_type(DataType data_type) {
  52. const std::unordered_map<inference::DataType, ms_serving::DataType> id2type_map{
  53. {inference::kMSI_Unknown, ms_serving::MS_UNKNOWN}, {inference::kMSI_Bool, ms_serving::MS_BOOL},
  54. {inference::kMSI_Float64, ms_serving::MS_FLOAT64}, {inference::kMSI_Int8, ms_serving::MS_INT8},
  55. {inference::kMSI_Uint8, ms_serving::MS_UINT8}, {inference::kMSI_Int16, ms_serving::MS_INT16},
  56. {inference::kMSI_Uint16, ms_serving::MS_UINT16}, {inference::kMSI_Int32, ms_serving::MS_INT32},
  57. {inference::kMSI_Uint32, ms_serving::MS_UINT32}, {inference::kMSI_Int64, ms_serving::MS_INT64},
  58. {inference::kMSI_Uint64, ms_serving::MS_UINT64}, {inference::kMSI_Float16, ms_serving::MS_FLOAT16},
  59. {inference::kMSI_Float32, ms_serving::MS_FLOAT32},
  60. };
  61. auto it = id2type_map.find(data_type);
  62. if (it == id2type_map.end()) {
  63. MSI_LOG_WARNING << "failed to set data type, undefined data type " << data_type;
  64. tensor_.set_tensor_type(ms_serving::MS_UNKNOWN);
  65. } else {
  66. tensor_.set_tensor_type(it->second);
  67. }
  68. }
  69. std::vector<int64_t> ServingTensor::shape() const {
  70. std::vector<int64_t> result;
  71. auto dims = tensor_.tensor_shape().dims();
  72. std::transform(dims.begin(), dims.end(), std::back_inserter(result), [](const int64_t dim) { return dim; });
  73. return result;
  74. }
  75. void ServingTensor::set_shape(const std::vector<int64_t> &shape) {
  76. auto tensor_shape = tensor_.mutable_tensor_shape();
  77. tensor_shape->Clear();
  78. size_t element_count = 1;
  79. for (auto dim : shape) {
  80. if (dim <= 0 || element_count > kMaxShapeElementCount / dim) {
  81. MSI_LOG_ERROR << "failed to set shape, invalid dim num " << dim;
  82. tensor_shape->Clear();
  83. return;
  84. }
  85. element_count *= dim;
  86. tensor_shape->add_dims(dim);
  87. }
  88. }
  89. bool ServingTensor::resize_data(size_t data_len) {
  90. string *buffer = tensor_.mutable_data();
  91. if (buffer == nullptr) {
  92. MSI_LOG_ERROR << "invalid buffer data";
  93. return false;
  94. }
  95. buffer->resize(data_len);
  96. return true;
  97. }
  98. size_t ServingTensor::data_size() const { return tensor_.data().size(); }
  99. void *ServingTensor::mutable_data() { return const_cast<char *>(tensor_.mutable_data()->data()); }
  100. const void *ServingTensor::data() const { return tensor_.data().data(); }
  101. ServingRequest::ServingRequest(const ms_serving::PredictRequest &request) : request_(request) {
  102. auto &data = request_.data();
  103. std::transform(data.begin(), data.end(), std::back_inserter(cache_),
  104. [](const ms_serving::Tensor &item) { return ServingTensor(const_cast<ms_serving::Tensor &>(item)); });
  105. }
  106. size_t ServingRequest::size() const { return request_.data_size(); }
  107. const InferTensorBase *ServingRequest::operator[](size_t index) const {
  108. if (index >= cache_.size()) {
  109. MSI_LOG_ERROR << "visit invalid index " << index << " total size " << cache_.size();
  110. return nullptr;
  111. }
  112. return &(cache_[index]);
  113. }
  114. size_t ServingReply::size() const { return cache_.size(); }
  115. InferTensorBase *ServingReply::operator[](size_t index) {
  116. if (index >= cache_.size()) {
  117. MSI_LOG_ERROR << "visit invalid index " << index << " total size " << cache_.size();
  118. return nullptr;
  119. }
  120. return &(cache_[index]);
  121. }
  122. const InferTensorBase *ServingReply::operator[](size_t index) const {
  123. if (index >= cache_.size()) {
  124. MSI_LOG_ERROR << "visit invalid index " << index << " total size " << cache_.size();
  125. return nullptr;
  126. }
  127. return &(cache_[index]);
  128. }
  129. InferTensorBase *ServingReply::add() {
  130. auto new_item = reply_.add_result();
  131. if (new_item == nullptr) {
  132. MSI_LOG_ERROR << "add new item failed, current total size " << cache_.size();
  133. return nullptr;
  134. }
  135. cache_.push_back(ServingTensor(*new_item));
  136. return &(cache_.back());
  137. }
  138. void ServingReply::clear() { reply_.mutable_result()->Clear(); }
  139. } // namespace serving
  140. } // namespace mindspore