You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model.h 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_INCLUDE_API_MODEL_H
  17. #define MINDSPORE_INCLUDE_API_MODEL_H
  18. #include <string>
  19. #include <vector>
  20. #include <map>
  21. #include <memory>
  22. #include <utility>
  23. #include "include/api/status.h"
  24. #include "include/api/types.h"
  25. #include "include/api/graph.h"
  26. #include "include/api/context.h"
  27. #include "include/api/callback/callback.h"
  28. #include "include/api/cell.h"
  29. #include "include/api/cfg.h"
  30. #include "include/api/dual_abi_helper.h"
  31. namespace mindspore {
  32. class ModelImpl;
  33. class Metrics;
  34. namespace dataset {
  35. class Dataset;
  36. } // namespace dataset
  37. /// \brief The Model class is used to define a MindSpore model, facilitating computational graph management.
  38. class MS_API Model {
  39. public:
  40. Model();
  41. ~Model();
  42. Model(const Model &) = delete;
  43. void operator=(const Model &) = delete;
  44. /// \brief Builds a model so that it can run on a device.
  45. ///
  46. /// \param[in] graph GraphCell is a derivative of Cell. Cell is not available currently. GraphCell can be constructed
  47. /// from Graph, for example, model.Build(GraphCell(graph), context).
  48. /// \param[in] model_context A context used to store options during execution.
  49. /// \param[in] train_cfg A config used by training.
  50. ///
  51. /// \return Status.
  52. Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr,
  53. const std::shared_ptr<TrainCfg> &train_cfg = nullptr);
  54. /// \brief Resizes the shapes of inputs.
  55. ///
  56. /// \param[in] inputs A vector that includes all input tensors in order.
  57. /// \param[in] dims Defines the new shapes of inputs, should be consistent with inputs.
  58. ///
  59. /// \return Status.
  60. Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);
  61. /// \brief Change the size and or content of weight tensors
  62. ///
  63. /// \param[in] new_weights a vector of tensors with new shapes and data to use in the model
  64. /// If data pointer is null, the data of the original tensors will be copied to the new ones
  65. ///
  66. /// \return Status.
  67. Status UpdateWeights(const std::vector<MSTensor> &new_weights);
  68. /// \brief Inference model.
  69. ///
  70. /// \param[in] inputs A vector where model inputs are arranged in sequence.
  71. /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.
  72. /// \param[in] before CallBack before predict.
  73. /// \param[in] after CallBack after predict.
  74. ///
  75. /// \return Status.
  76. Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,
  77. const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);
  78. /// \brief Inference model with preprocess in model.
  79. ///
  80. /// \param[in] inputs A vector where model inputs are arranged in sequence.
  81. /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.
  82. /// \param[in] whether to use data preprocess in model.
  83. /// \param[in] before CallBack before predict.
  84. /// \param[in] after CallBack after predict.
  85. ///
  86. /// \return Status.
  87. Status PredictWithPreprocess(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,
  88. const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr);
  89. /// \brief Apply data preprocess if it exits in model.
  90. ///
  91. /// \param[in] inputs A vector where model inputs are arranged in sequence.
  92. /// \param[out] outputs Which is a pointer to a vector. The model outputs are filled in the container in sequence.
  93. ///
  94. /// \return Status.
  95. Status Preprocess(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);
  96. /// \brief Check if data preprocess exists in model.
  97. /// \return true if data preprocess exists.
  98. bool HasPreprocess();
  99. /// \brief Load config file.
  100. ///
  101. /// \param[in] config_path config file path.
  102. ///
  103. /// \return Status.
  104. inline Status LoadConfig(const std::string &config_path);
  105. /// \brief Update config.
  106. ///
  107. /// \param[in] section define the config section.
  108. /// \param[in] config define the config will be updated.
  109. ///
  110. /// \return Status.
  111. inline Status UpdateConfig(const std::string &section, const std::pair<std::string, std::string> &config);
  112. /// \brief Obtains all input tensors of the model.
  113. ///
  114. /// \return The vector that includes all input tensors.
  115. std::vector<MSTensor> GetInputs();
  116. /// \brief Obtains the input tensor of the model by name.
  117. ///
  118. /// \return The input tensor with the given name, if the name is not found, an invalid tensor is returned.
  119. inline MSTensor GetInputByTensorName(const std::string &tensor_name);
  120. /// \brief Obtains all gradient tensors of the model.
  121. ///
  122. /// \return The vector that includes all gradient tensors.
  123. std::vector<MSTensor> GetGradients() const;
  124. /// \brief update gradient tensors of the model.
  125. ///
  126. /// \param[in] inputs A vector new gradients.
  127. /// \return Status of operation
  128. Status ApplyGradients(const std::vector<MSTensor> &gradients);
  129. /// \brief Obtains optimizer params tensors of the model.
  130. ///
  131. /// \return The vector that includes all params tensors.
  132. std::vector<MSTensor> GetOptimizerParams() const;
  133. /// \brief update the optimizer parameters
  134. ///
  135. /// \param[in] inputs A vector new optimizer params.
  136. /// \return Status of operation
  137. Status SetOptimizerParams(const std::vector<MSTensor> &params);
  138. Status InitMetrics(std::vector<Metrics *> metrics);
  139. std::vector<Metrics *> GetMetrics();
  140. /// \brief Obtains all output tensors of the model.
  141. ///
  142. /// \return The vector that includes all output tensors.
  143. std::vector<MSTensor> GetOutputs();
  144. /// \brief Obtains names of all output tensors of the model.
  145. ///
  146. /// \return A vector that includes names of all output tensors.
  147. inline std::vector<std::string> GetOutputTensorNames();
  148. /// \brief Obtains the output tensor of the model by name.
  149. ///
  150. /// \return The output tensor with the given name, if the name is not found, an invalid tensor is returned.
  151. inline MSTensor GetOutputByTensorName(const std::string &tensor_name);
  152. /// \brief Get output MSTensors of model by node name.
  153. ///
  154. /// \param[in] node_name Define node name.
  155. ///
  156. /// \note Deprecated, replace with GetOutputByTensorName
  157. ///
  158. /// \return The vector of output MSTensor.
  159. inline std::vector<MSTensor> GetOutputsByNodeName(const std::string &node_name);
  160. /// \brief Inference model.
  161. ///
  162. /// \param[in] device_type Device type,options are kGPU, kAscend910, etc.
  163. /// \param[in] model_type The type of model file, options are ModelType::kMindIR, ModelType::kOM.
  164. ///
  165. /// \return Is supported or not.
  166. static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);
  167. Status SetTrainMode(bool train);
  168. bool GetTrainMode() const;
  169. Status Train(int epochs, std::shared_ptr<dataset::Dataset> ds, std::vector<TrainCallBack *> cbs);
  170. Status Evaluate(std::shared_ptr<dataset::Dataset> ds, std::vector<TrainCallBack *> cbs);
  171. /// \brief Build a model from model buffer so that it can run on a device. Only valid for Lite.
  172. ///
  173. /// \param[in] model_data Define the buffer read from a model file.
  174. /// \param[in] size Define bytes number of model buffer.
  175. /// \param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only
  176. /// ModelType::kMindIR is valid for Lite.
  177. /// \param[in] model_context Define the context used to store options during execution.
  178. /// \param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16, 24, or 32.
  179. /// \param[in] dec_mode Define the decryption mode. Options: AES-GCM, AES-CBC.
  180. ///
  181. /// \return Status.
  182. inline Status Build(const void *model_data, size_t data_size, ModelType model_type,
  183. const std::shared_ptr<Context> &model_context = nullptr, const Key &dec_key = {},
  184. const std::string &dec_mode = kDecModeAesGcm);
  185. /// \brief Load and build a model from model buffer so that it can run on a device. Only valid for Lite.
  186. ///
  187. /// \param[in] model_path Define the model path.
  188. /// \param[in] model_type Define The type of model file. Options: ModelType::kMindIR, ModelType::kOM. Only
  189. /// ModelType::kMindIR is valid for Lite.
  190. /// \param[in] model_context Define the context used to store options during execution.
  191. /// \param[in] dec_key Define the key used to decrypt the ciphertext model. The key length is 16, 24, or 32.
  192. /// \param[in] dec_mode Define the decryption mode. Options: AES-GCM, AES-CBC.
  193. ///
  194. /// \return Status.
  195. inline Status Build(const std::string &model_path, ModelType model_type,
  196. const std::shared_ptr<Context> &model_context = nullptr, const Key &dec_key = {},
  197. const std::string &dec_mode = kDecModeAesGcm);
  198. private:
  199. friend class Serialization;
  200. // api without std::string
  201. MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
  202. std::vector<std::vector<char>> GetOutputTensorNamesChar();
  203. MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
  204. std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);
  205. Status LoadConfig(const std::vector<char> &config_path);
  206. Status UpdateConfig(const std::vector<char> &section, const std::pair<std::vector<char>, std::vector<char>> &config);
  207. Status Build(const void *model_data, size_t data_size, ModelType model_type,
  208. const std::shared_ptr<Context> &model_context, const Key &dec_key, const std::vector<char> &dec_mode);
  209. Status Build(const std::vector<char> &model_path, ModelType model_type, const std::shared_ptr<Context> &model_context,
  210. const Key &dec_key, const std::vector<char> &dec_mode);
  211. std::shared_ptr<ModelImpl> impl_;
  212. };
  213. MSTensor Model::GetInputByTensorName(const std::string &tensor_name) {
  214. return GetInputByTensorName(StringToChar(tensor_name));
  215. }
  216. std::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }
  217. MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {
  218. return GetOutputByTensorName(StringToChar(tensor_name));
  219. }
  220. std::vector<MSTensor> Model::GetOutputsByNodeName(const std::string &node_name) {
  221. return GetOutputsByNodeName(StringToChar(node_name));
  222. }
  223. Status Model::LoadConfig(const std::string &config_path) {
  224. return LoadConfig(StringToChar(config_path));
  225. }
  226. Status Model::UpdateConfig(const std::string &section, const std::pair<std::string, std::string> &config) {
  227. std::pair<std::vector<char>, std::vector<char>> config_pair = {StringToChar(config.first),
  228. StringToChar(config.second)};
  229. return UpdateConfig(StringToChar(section), config_pair);
  230. }
  231. Status Model::Build(const void *model_data, size_t data_size, ModelType model_type,
  232. const std::shared_ptr<Context> &model_context, const Key &dec_key, const std::string &dec_mode) {
  233. return Build(model_data, data_size, model_type, model_context, dec_key, StringToChar(dec_mode));
  234. }
  235. Status Model::Build(const std::string &model_path, ModelType model_type, const std::shared_ptr<Context> &model_context,
  236. const Key &dec_key, const std::string &dec_mode) {
  237. return Build(StringToChar(model_path), model_type, model_context, dec_key, StringToChar(dec_mode));
  238. }
  239. } // namespace mindspore
  240. #endif // MINDSPORE_INCLUDE_API_MODEL_H