You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model.h 2.2 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_SERVING_WORKER_MODEL_H
  17. #define MINDSPORE_SERVING_WORKER_MODEL_H
  18. #include <memory>
  19. #include <unordered_map>
  20. #include <vector>
  21. #include <string>
  22. #include "common/serving_common.h"
  23. #include "common/instance.h"
  24. #include "common/servable.h"
  25. #include "worker/inference/inference.h"
  26. namespace mindspore::serving {
  27. class ServableBase {
  28. public:
  29. ServableBase() = default;
  30. virtual ~ServableBase() = default;
  31. virtual Status Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output) = 0;
  32. virtual std::vector<TensorInfo> GetInputInfos() const = 0;
  33. virtual std::vector<TensorInfo> GetOutputInfos() const = 0;
  34. virtual uint64_t GetBatchSize() const = 0;
  35. virtual TensorBasePtr MakeInferenceTensor(DataType data_type, const std::vector<int64_t> &shape) const {
  36. return nullptr;
  37. }
  38. };
  39. class AscendModelServable : public ServableBase {
  40. public:
  41. AscendModelServable(const std::shared_ptr<serving::InferSession> &session, uint32_t model_id)
  42. : session_(session), model_id_(model_id) {}
  43. ~AscendModelServable() = default;
  44. Status Predict(const std::vector<TensorBasePtr> &input, std::vector<TensorBasePtr> *output) override;
  45. std::vector<TensorInfo> GetInputInfos() const override;
  46. std::vector<TensorInfo> GetOutputInfos() const override;
  47. uint64_t GetBatchSize() const override;
  48. TensorBasePtr MakeInferenceTensor(DataType data_type, const std::vector<int64_t> &shape) const override;
  49. private:
  50. std::shared_ptr<serving::InferSession> session_{nullptr};
  51. uint32_t model_id_ = 0;
  52. };
  53. } // namespace mindspore::serving
  54. #endif // MINDSPORE_SERVING_WORKER_MODEL_H

A lightweight and high-performance service module that helps MindSpore developers efficiently deploy online inference services in the production environment.