You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

session.cc 5.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "core/session.h"
  17. #include <grpcpp/grpcpp.h>
  18. #include <string>
  19. #include <map>
  20. #include <vector>
  21. #include <utility>
  22. #include <memory>
  23. #include <chrono>
  24. #include "include/infer_log.h"
  25. #include "serving/ms_service.grpc.pb.h"
  26. #include "core/util/option_parser.h"
  27. #include "core/version_control/version_controller.h"
  28. #include "core/util/file_system_operation.h"
  29. #include "core/serving_tensor.h"
  30. using ms_serving::MSService;
  31. using ms_serving::PredictReply;
  32. using ms_serving::PredictRequest;
  33. namespace mindspore {
  34. namespace serving {
  35. Status Session::CreatDeviceSession(const std::string &device, uint32_t device_id) {
  36. session_ = inference::InferSession::CreateSession(device, device_id);
  37. if (session_ == nullptr) {
  38. MSI_LOG(ERROR) << "Creat Session Failed";
  39. return FAILED;
  40. }
  41. device_type_ = device;
  42. return SUCCESS;
  43. }
  44. Session &Session::Instance() {
  45. static Session instance;
  46. return instance;
  47. }
  48. Status Session::Predict(const PredictRequest &request, PredictReply &reply) {
  49. try {
  50. auto status = PredictInner(request, reply);
  51. return status;
  52. } catch (const std::bad_alloc &ex) {
  53. MSI_LOG(ERROR) << "Serving Error: malloc memory failed";
  54. std::cout << "Serving Error: malloc memory failed" << std::endl;
  55. } catch (const std::runtime_error &ex) {
  56. MSI_LOG(ERROR) << "Serving Error: runtime error occurred: " << ex.what();
  57. std::cout << "Serving Error: runtime error occurred: " << ex.what() << std::endl;
  58. } catch (const std::exception &ex) {
  59. MSI_LOG(ERROR) << "Serving Error: exception occurred: " << ex.what();
  60. std::cout << "Serving Error: exception occurred: " << ex.what() << std::endl;
  61. } catch (...) {
  62. MSI_LOG(ERROR) << "Serving Error: exception occurred";
  63. std::cout << "Serving Error: exception occurred";
  64. }
  65. return FAILED;
  66. }
  67. Status Session::PredictInner(const PredictRequest &request, PredictReply &reply) {
  68. if (!model_loaded_) {
  69. MSI_LOG(ERROR) << "the model has not loaded";
  70. return FAILED;
  71. }
  72. if (session_ == nullptr) {
  73. MSI_LOG(ERROR) << "the inference session has not be initialized";
  74. return FAILED;
  75. }
  76. std::lock_guard<std::mutex> lock(mutex_);
  77. MSI_LOG(INFO) << "run Predict";
  78. if (request.images_size() > 0) {
  79. ServingImagesRequest serving_images(request);
  80. ServingRequest serving_request(request);
  81. ServingReply serving_reply(reply);
  82. Status ret = session_->ExecuteModel(graph_id_, serving_images, serving_request, serving_reply);
  83. if (ret != SUCCESS) {
  84. MSI_LOG(ERROR) << "execute model with images return failed";
  85. return ret;
  86. }
  87. } else if (request.data_size() > 0) {
  88. ServingRequest serving_request(request);
  89. ServingReply serving_reply(reply);
  90. Status ret = session_->ExecuteModel(graph_id_, serving_request, serving_reply);
  91. if (ret != SUCCESS) {
  92. MSI_LOG(ERROR) << "execute model with datas return failed";
  93. return ret;
  94. }
  95. }
  96. MSI_LOG(INFO) << "run Predict finished";
  97. return SUCCESS;
  98. }
  99. Status Session::Warmup(const MindSporeModelPtr model) {
  100. if (session_ == nullptr) {
  101. MSI_LOG(ERROR) << "The CreatDeviceSession should be called, before warmup";
  102. return FAILED;
  103. }
  104. std::lock_guard<std::mutex> lock(mutex_);
  105. std::string file_name = model->GetModelPath() + '/' + model->GetModelName();
  106. model_loaded_ = false;
  107. MSI_TIME_STAMP_START(LoadModelFromFile)
  108. auto ret = session_->LoadModelFromFile(file_name, graph_id_);
  109. MSI_TIME_STAMP_END(LoadModelFromFile)
  110. if (ret != SUCCESS) {
  111. MSI_LOG(ERROR) << "Load graph model failed, file name is " << file_name.c_str();
  112. return ret;
  113. }
  114. model_loaded_ = true;
  115. MSI_LOG(INFO) << "Session Warmup finished";
  116. return SUCCESS;
  117. }
  118. Status Session::Clear() {
  119. if (session_ != nullptr) {
  120. session_->UnloadModel(graph_id_);
  121. session_->FinalizeEnv();
  122. session_ = nullptr;
  123. }
  124. return SUCCESS;
  125. }
  126. Status Session::GetModelInputsInfo(std::vector<inference::InferTensor> &tensor_list) {
  127. if (!model_loaded_) {
  128. MSI_LOG(ERROR) << "the model has not loaded";
  129. return FAILED;
  130. }
  131. if (session_ == nullptr) {
  132. MSI_LOG(ERROR) << "the inference session has not be initialized";
  133. return FAILED;
  134. }
  135. std::lock_guard<std::mutex> lock(mutex_);
  136. Status ret = session_->GetModelInputsInfo(graph_id_, &tensor_list);
  137. if (ret != SUCCESS) {
  138. MSI_LOG(ERROR) << "get model inputs info failed";
  139. }
  140. return ret;
  141. }
  142. } // namespace serving
  143. } // namespace mindspore