You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

session.h 1.9 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_SERVING_SESSION_H
  17. #define MINDSPORE_SERVING_SESSION_H
  18. #include <string>
  19. #include <mutex>
  20. #include <vector>
  21. #include <memory>
  22. #include "util/status.h"
  23. #include "version_control/model.h"
  24. #include "include/inference.h"
  25. #include "serving/ms_service.pb.h"
  26. #include "serving/ms_service.grpc.pb.h"
  27. namespace mindspore {
  28. namespace serving {
  29. using inference::FAILED;
  30. using inference::INVALID_INPUTS;
  31. using inference::Status;
  32. using inference::SUCCESS;
  33. using ms_serving::PredictReply;
  34. using ms_serving::PredictRequest;
  35. class Session {
  36. public:
  37. static Session &Instance();
  38. Status CreatDeviceSession(const std::string &device, uint32_t device_id);
  39. // Status Predict(const inference::MultiTensor &inputs, inference::MultiTensor &output);
  40. Status Predict(const PredictRequest &request, PredictReply &reply);
  41. Status Warmup(const MindSporeModelPtr model);
  42. Status Clear();
  43. Status GetModelInputsInfo(std::vector<inference::InferTensor> &tensor_list);
  44. private:
  45. Session() = default;
  46. ~Session() = default;
  47. int sesseion_id_{0};
  48. std::shared_ptr<inference::InferSession> session_{nullptr};
  49. bool model_loaded_ = false;
  50. uint32_t graph_id_{0};
  51. std::mutex mutex_;
  52. std::string device_type_;
  53. };
  54. } // namespace serving
  55. } // namespace mindspore
  56. #endif // MINDSPORE_SERVER_H