You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

inference.h 1.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_INCLUDE_MS_SESSION_H
  17. #define MINDSPORE_INCLUDE_MS_SESSION_H
  18. #include <memory>
  19. #include <vector>
  20. #include <string>
  21. #include "include/infer_tensor.h"
  22. namespace mindspore {
  23. namespace inference {
  24. enum Status { SUCCESS = 0, FAILED, INVALID_INPUTS };
  25. class MS_API InferSession {
  26. public:
  27. InferSession() = default;
  28. virtual ~InferSession() = default;
  29. virtual Status InitEnv(const std::string &device_type, uint32_t device_id) = 0;
  30. virtual Status FinalizeEnv() = 0;
  31. virtual Status LoadModelFromFile(const std::string &file_name, uint32_t &model_id) = 0;
  32. virtual Status UnloadModel(uint32_t model_id) = 0;
  33. // override this method to avoid request/reply data copy
  34. virtual Status ExecuteModel(uint32_t model_id, const RequestBase &request, ReplyBase &reply) = 0;
  35. virtual Status ExecuteModel(uint32_t model_id, const std::vector<InferTensor> &inputs,
  36. std::vector<InferTensor> &outputs) {
  37. VectorInferTensorWrapRequest request(inputs);
  38. VectorInferTensorWrapReply reply(outputs);
  39. return ExecuteModel(model_id, request, reply);
  40. }
  41. static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id);
  42. };
  43. } // namespace inference
  44. } // namespace mindspore
  45. #endif // MINDSPORE_INCLUDE_MS_SESSION_H