You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

context.h 16 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_INCLUDE_API_CONTEXT_H
  17. #define MINDSPORE_INCLUDE_API_CONTEXT_H
  18. #include <string>
  19. #include <memory>
  20. #include <vector>
  21. #include <map>
  22. #include "include/api/types.h"
  23. #include "include/api/dual_abi_helper.h"
  24. namespace mindspore {
  25. enum DeviceType {
  26. kCPU = 0,
  27. kGPU,
  28. kKirinNPU,
  29. kAscend910,
  30. kAscend310,
  31. // add new type here
  32. kInvalidDeviceType = 100,
  33. };
  34. class Allocator;
  35. class Delegate;
  36. class DeviceInfoContext;
  37. /// \brief Context is used to store environment variables during execution.
  38. class MS_API Context {
  39. public:
  40. Context();
  41. ~Context() = default;
  42. /// \brief Set the number of threads at runtime. Only valid for Lite.
  43. ///
  44. /// \param[in] thread_num the number of threads at runtime.
  45. void SetThreadNum(int32_t thread_num);
  46. /// \brief Get the current thread number setting. Only valid for Lite.
  47. ///
  48. /// \return The current thread number setting.
  49. int32_t GetThreadNum() const;
  50. /// \brief Set the thread affinity to CPU cores. Only valid for Lite.
  51. ///
  52. /// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first
  53. void SetThreadAffinity(int mode);
  54. /// \brief Get the thread affinity of CPU cores. Only valid for Lite.
  55. ///
  56. /// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first
  57. int GetThreadAffinityMode() const;
  58. /// \brief Set the thread lists to CPU cores. Only valid for Lite.
  59. ///
  60. /// \note If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the
  61. /// mode is not effective.
  62. ///
  63. /// \param[in] core_list: a vector of thread core lists.
  64. void SetThreadAffinity(const std::vector<int> &core_list);
  65. /// \brief Get the thread lists of CPU cores. Only valid for Lite.
  66. ///
  67. /// \return core_list: a vector of thread core lists.
  68. std::vector<int32_t> GetThreadAffinityCoreList() const;
  69. /// \brief Set the status whether to perform model inference or training in parallel. Only valid for Lite.
  70. ///
  71. /// \param[in] is_parallel: true, parallel; false, not in parallel.
  72. void SetEnableParallel(bool is_parallel);
  73. /// \brief Get the status whether to perform model inference or training in parallel. Only valid for Lite.
  74. ///
  75. /// \return Bool value that indicates whether in parallel.
  76. bool GetEnableParallel() const;
  77. /// \brief Set Delegate to access third-party AI framework. Only valid for Lite.
  78. ///
  79. /// \param[in] Pointer to the custom delegate.
  80. void SetDelegate(const std::shared_ptr<Delegate> &delegate);
  81. /// \brief Get the delegate of the third-party AI framework. Only valid for Lite.
  82. ///
  83. /// \return Pointer to the custom delegate.
  84. std::shared_ptr<Delegate> GetDelegate() const;
  85. /// \brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports
  86. /// heterogeneous scenarios with multiple members in the vector.
  87. ///
  88. /// \return Mutable reference of DeviceInfoContext vector in this context.
  89. std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();
  90. private:
  91. struct Data;
  92. std::shared_ptr<Data> data_;
  93. };
  94. /// \brief DeviceInfoContext defines different device contexts.
  95. class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
  96. public:
  97. struct Data;
  98. DeviceInfoContext();
  99. virtual ~DeviceInfoContext() = default;
  100. /// \brief Get the type of this DeviceInfoContext.
  101. ///
  102. /// \return Type of this DeviceInfoContext.
  103. virtual enum DeviceType GetDeviceType() const = 0;
  104. /// \brief A similar function to RTTI is provided when the -fno-rtti compilation option is turned on, which converts
  105. /// DeviceInfoContext to a shared pointer of type T, and returns nullptr if the conversion fails.
  106. ///
  107. /// \param T Type
  108. /// \return A pointer of type T after conversion. If the conversion fails, it will be nullptr.
  109. template <class T>
  110. std::shared_ptr<T> Cast() {
  111. static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
  112. if (GetDeviceType() != T().GetDeviceType()) {
  113. return nullptr;
  114. }
  115. return std::static_pointer_cast<T>(shared_from_this());
  116. }
  117. /// \brief obtain provider's name
  118. ///
  119. /// \return provider's name.
  120. std::string GetProvider() const;
  121. /// \brief set provider's name.
  122. ///
  123. /// \param[in] provider define the provider's name.
  124. void SetProvider(const std::string &provider);
  125. /// \brief obtain provider's device type.
  126. ///
  127. /// \return provider's device type.
  128. std::string GetProviderDevice() const;
  129. /// \brief set provider's device type.
  130. ///
  131. /// \param[in] device define the provider's device type.EG: CPU.
  132. void SetProviderDevice(const std::string &device);
  133. /// \brief set memory allocator.
  134. ///
  135. /// \param[in] allocator define the memory allocator which can be defined by user.
  136. void SetAllocator(const std::shared_ptr<Allocator> &allocator);
  137. /// \brief obtain memory allocator.
  138. ///
  139. /// \return memory allocator.
  140. std::shared_ptr<Allocator> GetAllocator() const;
  141. protected:
  142. std::shared_ptr<Data> data_;
  143. };
  144. /// \brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid
  145. /// for MindSpore Lite.
  146. class MS_API CPUDeviceInfo : public DeviceInfoContext {
  147. public:
  148. /// \brief Get the type of this DeviceInfoContext.
  149. ///
  150. /// \return Type of this DeviceInfoContext.
  151. enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };
  152. /// \brief Set enables to perform the float16 inference
  153. ///
  154. /// \param[in] is_fp16 Enable float16 inference or not.
  155. void SetEnableFP16(bool is_fp16);
  156. /// \brief Get enables to perform the float16 inference
  157. ///
  158. /// \return Whether enable float16 inference.
  159. bool GetEnableFP16() const;
  160. };
  161. /// \brief Derived from DeviceInfoContext, The configuration of the model running on the NPU. This option is only valid
  162. /// for MindSpore Lite.
  163. class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
  164. public:
  165. /// \brief Get the type of this DeviceInfoContext.
  166. ///
  167. /// \return Type of this DeviceInfoContext.
  168. enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };
  169. /// \brief Set the NPU frequency.
  170. ///
  171. /// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme
  172. /// performance), default as 3.
  173. void SetFrequency(int frequency);
  174. /// \brief Get the NPU frequency.
  175. ///
  176. /// \return NPU frequency
  177. int GetFrequency() const;
  178. };
  179. /// \brief Derived from DeviceInfoContext, The configuration of the model running on the GPU.
  180. class MS_API GPUDeviceInfo : public DeviceInfoContext {
  181. public:
  182. /// \brief Get the type of this DeviceInfoContext.
  183. ///
  184. /// \return Type of this DeviceInfoContext.
  185. enum DeviceType GetDeviceType() const override { return DeviceType::kGPU; };
  186. /// \brief Set device id.
  187. ///
  188. /// \param[in] device_id The device id.
  189. void SetDeviceID(uint32_t device_id);
  190. /// \brief Get the device id.
  191. ///
  192. /// \return The device id.
  193. uint32_t GetDeviceID() const;
  194. void SetGpuTrtInferMode(bool gpu_trt_infer_mode);
  195. bool GetGpuTrtInferMode() const;
  196. inline void SetPrecisionMode(const std::string &precison_mode);
  197. inline std::string GetPrecisionMode() const;
  198. /// \brief Set enables to perform the float16 inference
  199. ///
  200. /// \param[in] is_fp16 Enable float16 inference or not.
  201. void SetEnableFP16(bool is_fp16);
  202. /// \brief Get enables to perform the float16 inference
  203. ///
  204. /// \return Whether enable float16 inference.
  205. bool GetEnableFP16() const;
  206. private:
  207. void SetPrecisionMode(const std::vector<char> &precision_mode);
  208. std::vector<char> GetPrecisionModeChar() const;
  209. };
  210. void GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
  211. SetPrecisionMode(StringToChar(precision_mode));
  212. }
  213. std::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
  214. /// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend910. This option is
  215. /// invalid for MindSpore Lite.
  216. class MS_API Ascend910DeviceInfo : public DeviceInfoContext {
  217. public:
  218. /// \brief Get the type of this DeviceInfoContext.
  219. ///
  220. /// \return Type of this DeviceInfoContext.
  221. enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; };
  222. /// \brief Set device id.
  223. ///
  224. /// \param[in] device_id The device id.
  225. void SetDeviceID(uint32_t device_id);
  226. /// \brief Get the device id.
  227. ///
  228. /// \return The device id.
  229. uint32_t GetDeviceID() const;
  230. };
  231. /// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend310. This option is
  232. /// invalid for MindSpore Lite.
  233. class MS_API Ascend310DeviceInfo : public DeviceInfoContext {
  234. public:
  235. /// \brief Get the type of this DeviceInfoContext.
  236. ///
  237. /// \return Type of this DeviceInfoContext.
  238. enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; };
  239. /// \brief Set device id.
  240. ///
  241. /// \param[in] device_id The device id.
  242. void SetDeviceID(uint32_t device_id);
  243. /// \brief Get the device id.
  244. ///
  245. /// \return The device id.
  246. uint32_t GetDeviceID() const;
  247. inline void SetDumpConfigPath(const std::string &cfg_path);
  248. inline std::string GetDumpConfigPath() const;
  249. /// \brief Set AIPP configuration file path.
  250. ///
  251. /// \param[in] cfg_path AIPP configuration file path.
  252. inline void SetInsertOpConfigPath(const std::string &cfg_path);
  253. /// \brief Get AIPP configuration file path.
  254. ///
  255. /// \return AIPP configuration file path.
  256. inline std::string GetInsertOpConfigPath() const;
  257. /// \brief Set format of model inputs.
  258. ///
  259. /// \param[in] format Optional "NCHW", "NHWC", etc.
  260. inline void SetInputFormat(const std::string &format);
  261. /// \brief Get format of model inputs.
  262. ///
  263. /// \return The format of model inputs.
  264. inline std::string GetInputFormat() const;
  265. /// \brief Set shape of model inputs.
  266. ///
  267. /// \param[in] shape e.g. "input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1".
  268. inline void SetInputShape(const std::string &shape);
  269. /// \brief Get shape of model inputs.
  270. ///
  271. /// \return The shape of model inputs.
  272. inline std::string GetInputShape() const;
  273. /// \brief Set shape of model inputs.
  274. ///
  275. /// \param[in] shape e.g. {{1, {1,2,3,4}}, {2, {4,3,2,1}}} means the first input shape 1,2,3,4 and the second input
  276. /// shape 4,3,2,1.
  277. void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
  278. /// \brief Get shape of model inputs.
  279. ///
  280. /// \return The shape of model inputs.
  281. std::map<int, std::vector<int>> GetInputShapeMap() const;
  282. void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
  283. inline std::string GetDynamicBatchSize() const;
  284. /// \brief Set type of model outputs.
  285. ///
  286. /// \param[in] output_type FP32, UINT8 or FP16, default as FP32.
  287. void SetOutputType(enum DataType output_type);
  288. /// \brief Get type of model outputs.
  289. ///
  290. /// \return The set type of model outputs.
  291. enum DataType GetOutputType() const;
  292. /// \brief Set precision mode of model.
  293. ///
  294. /// \param[in] precision_mode Optional "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" and
  295. /// "allow_mix_precision", "force_fp16" is set as default
  296. inline void SetPrecisionMode(const std::string &precision_mode);
  297. /// \brief Get precision mode of model.
  298. ///
  299. /// \return The set type of model outputs
  300. inline std::string GetPrecisionMode() const;
  301. /// \brief Set op select implementation mode.
  302. ///
  303. /// \param[in] op_select_impl_mode Optional "high_performance" and "high_precision", "high_performance" is set as
  304. /// default.
  305. inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
  306. /// \brief Get op select implementation mode.
  307. ///
  308. /// \return The set op select implementation mode.
  309. inline std::string GetOpSelectImplMode() const;
  310. inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
  311. inline std::string GetFusionSwitchConfigPath() const;
  312. // Optional "l1_optimize", "l2_optimize", "off_optimize" or "l1_and_l2_optimize", default as "l2_optimize"
  313. inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode);
  314. inline std::string GetBufferOptimizeMode() const;
  315. private:
  316. void SetDumpConfigPath(const std::vector<char> &cfg_path);
  317. std::vector<char> GetDumpConfigPathChar() const;
  318. void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
  319. std::vector<char> GetInsertOpConfigPathChar() const;
  320. void SetInputFormat(const std::vector<char> &format);
  321. std::vector<char> GetInputFormatChar() const;
  322. void SetInputShape(const std::vector<char> &shape);
  323. std::vector<char> GetInputShapeChar() const;
  324. std::vector<char> GetDynamicBatchSizeChar() const;
  325. void SetPrecisionMode(const std::vector<char> &precision_mode);
  326. std::vector<char> GetPrecisionModeChar() const;
  327. void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
  328. std::vector<char> GetOpSelectImplModeChar() const;
  329. void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
  330. std::vector<char> GetFusionSwitchConfigPathChar() const;
  331. void SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode);
  332. std::vector<char> GetBufferOptimizeModeChar() const;
  333. };
  334. void Ascend310DeviceInfo::SetDumpConfigPath(const std::string &cfg_path) { SetDumpConfigPath(StringToChar(cfg_path)); }
  335. std::string Ascend310DeviceInfo::GetDumpConfigPath() const { return CharToString(GetDumpConfigPathChar()); }
  336. void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
  337. SetInsertOpConfigPath(StringToChar(cfg_path));
  338. }
  339. std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }
  340. void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
  341. std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }
  342. void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
  343. std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }
  344. std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }
  345. void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
  346. SetPrecisionMode(StringToChar(precision_mode));
  347. }
  348. std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
  349. void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
  350. SetOpSelectImplMode(StringToChar(op_select_impl_mode));
  351. }
  352. std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }
  353. void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
  354. SetFusionSwitchConfigPath(StringToChar(cfg_path));
  355. }
  356. std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const {
  357. return CharToString(GetFusionSwitchConfigPathChar());
  358. }
  359. void Ascend310DeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) {
  360. SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));
  361. }
  362. std::string Ascend310DeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }
  363. } // namespace mindspore
  364. #endif // MINDSPORE_INCLUDE_API_CONTEXT_H