You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

serialization.h 6.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_INCLUDE_API_SERIALIZATION_H
  17. #define MINDSPORE_INCLUDE_API_SERIALIZATION_H
  18. #include <string>
  19. #include <vector>
  20. #include <map>
  21. #include <memory>
  22. #include "include/api/status.h"
  23. #include "include/api/types.h"
  24. #include "include/api/model.h"
  25. #include "include/api/graph.h"
  26. #include "include/api/dual_abi_helper.h"
  27. namespace mindspore {
  28. /// \brief The Serialization class is used to summarize methods for reading and writing model files.
  29. class MS_API Serialization {
  30. public:
  31. /// \brief Loads a model file from memory buffer.
  32. ///
  33. /// \param[in] model_data A buffer filled by model file.
  34. /// \param[in] data_size The size of the buffer.
  35. /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.
  36. /// \param[out] graph The output parameter, an object saves graph data.
  37. /// \param[in] dec_key The decryption key, key length is 16, 24, or 32.
  38. /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.
  39. ///
  40. /// \return Status.
  41. inline static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph,
  42. const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm);
  43. /// \brief Loads a model file from path, is not supported on MindSpore Lite.
  44. ///
  45. /// \param[in] file The path of model file.
  46. /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.
  47. /// \param[out] graph The output parameter, an object saves graph data.
  48. /// \param[in] dec_key The decryption key, key length is 16, 24, or 32.
  49. /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.
  50. ///
  51. /// \return Status.
  52. inline static Status Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key = {},
  53. const std::string &dec_mode = kDecModeAesGcm);
  54. /// \brief Load multiple models from multiple files, MindSpore Lite does not provide this feature.
  55. ///
  56. /// \param[in] files The path of model files.
  57. /// \param[in] model_type The Type of model file, options are ModelType::kMindIR, ModelType::kOM.
  58. /// \param[out] graph The output parameter, an object saves graph data.
  59. /// \param[in] dec_key The decryption key, key length is 16, 24, or 32.
  60. /// \param[in] dec_mode The decryption mode, optional options are AES-GCM, AES-CBC.
  61. ///
  62. /// \return Status.
  63. inline static Status Load(const std::vector<std::string> &files, ModelType model_type, std::vector<Graph> *graphs,
  64. const Key &dec_key = {}, const std::string &dec_mode = kDecModeAesGcm);
  65. static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model);
  66. static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);
  67. inline static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file,
  68. QuantizationType quantization_type = kNoQuant, bool export_inference_only = true,
  69. std::vector<std::string> output_tensor_name = {});
  70. private:
  71. static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph, const Key &dec_key,
  72. const std::vector<char> &dec_mode);
  73. static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);
  74. static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph, const Key &dec_key,
  75. const std::vector<char> &dec_mode);
  76. static Status Load(const std::vector<std::vector<char>> &files, ModelType model_type, std::vector<Graph> *graphs,
  77. const Key &dec_key, const std::vector<char> &dec_mode);
  78. static Status ExportModel(const Model &model, ModelType model_type, const std::vector<char> &model_file,
  79. QuantizationType quantization_type, bool export_inference_only,
  80. const std::vector<std::vector<char>> &output_tensor_name);
  81. };
  82. Status Serialization::Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph,
  83. const Key &dec_key, const std::string &dec_mode) {
  84. return Load(model_data, data_size, model_type, graph, dec_key, StringToChar(dec_mode));
  85. }
  86. Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph, const Key &dec_key,
  87. const std::string &dec_mode) {
  88. return Load(StringToChar(file), model_type, graph, dec_key, StringToChar(dec_mode));
  89. }
  90. Status Serialization::Load(const std::vector<std::string> &files, ModelType model_type, std::vector<Graph> *graphs,
  91. const Key &dec_key, const std::string &dec_mode) {
  92. return Load(VectorStringToChar(files), model_type, graphs, dec_key, StringToChar(dec_mode));
  93. }
  94. Status Serialization::ExportModel(const Model &model, ModelType model_type, const std::string &model_file,
  95. QuantizationType quantization_type, bool export_inference_only,
  96. std::vector<std::string> output_tensor_name) {
  97. return ExportModel(model, model_type, StringToChar(model_file), quantization_type, export_inference_only,
  98. VectorStringToChar(output_tensor_name));
  99. }
  100. } // namespace mindspore
  101. #endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H