You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

network.cpp 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. /**
  2. * \file src/network.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "lite/network.h"
  12. #include "function_base.h"
  13. #include "network_impl_base.h"
  14. #include "parse_info/parse_info_base.h"
  15. #include "parse_model/model_parser.h"
  16. #include "type_info.h"
  17. #if LITE_BUILD_WITH_MGE
  18. #include "mge/function_dft.h"
  19. #include "mge/network_impl.h"
  20. #endif
  21. #include <fstream>
  22. #include <memory>
  23. using namespace lite;
  24. /**
  25. * \brief Construct the new work implement
  26. * the order must be :
  27. * 1. creeat the implement
  28. * 2. config and load
  29. * 3. set_io
  30. */
  31. Network::Network(const Config& config, const NetworkIO& network_io) {
  32. LITE_ERROR_HANDLER_BEGIN
  33. m_config = config;
  34. m_network_io = network_io;
  35. if (config.backend == LiteBackend::LITE_DEFAULT) {
  36. m_impl = call_func<
  37. NetworkImplDft, std::unique_ptr<lite::Network::NetworkImplBase>>(
  38. "create_network");
  39. }
  40. m_impl->set_config(config);
  41. m_impl->set_io(network_io);
  42. LITE_ERROR_HANDLER_END
  43. }
  44. Network::Network(const NetworkIO& network_io, const Config& config) {
  45. LITE_ERROR_HANDLER_BEGIN
  46. m_config = config;
  47. m_network_io = network_io;
  48. if (config.backend == LiteBackend::LITE_DEFAULT) {
  49. m_impl = call_func<
  50. NetworkImplDft, std::unique_ptr<lite::Network::NetworkImplBase>>(
  51. "create_network");
  52. }
  53. m_impl->set_config(config);
  54. m_impl->set_io(network_io);
  55. LITE_ERROR_HANDLER_END
  56. }
  57. void Network::load_model(void* model_mem, size_t size) {
  58. LITE_ERROR_HANDLER_BEGIN
  59. LITE_CHECK_NON_NULL_POINTER(m_impl);
  60. //! this model_mem is managed by user
  61. std::shared_ptr<void> model{model_mem, [](void*) {}};
  62. prase_model(model, size);
  63. LITE_ERROR_HANDLER_END
  64. }
  65. void Network::load_model(std::string model_path) {
  66. LITE_ERROR_HANDLER_BEGIN
  67. LITE_CHECK_NON_NULL_POINTER(m_impl);
  68. FILE* fin = fopen(model_path.c_str(), "rb");
  69. LITE_ASSERT(fin, "failed to open %s: %s", model_path.c_str(), strerror(errno));
  70. fseek(fin, 0, SEEK_END);
  71. size_t size = ftell(fin);
  72. fseek(fin, 0, SEEK_SET);
  73. void* ptr = malloc(size);
  74. std::shared_ptr<void> buf{ptr, ::free};
  75. auto nr = fread(buf.get(), 1, size, fin);
  76. LITE_ASSERT(nr == size);
  77. fclose(fin);
  78. prase_model(buf, size);
  79. LITE_ERROR_HANDLER_END
  80. }
  81. void Network::prase_model(std::shared_ptr<void> model_data, size_t size) {
  82. std::unordered_map<std::string, LiteAny> separate_config_map;
  83. ModelParser model_parser(model_data, size);
  84. //! parse the model info
  85. if (model_parser.parse_model_info(
  86. m_config, m_network_io, separate_config_map, m_extra_info)) {
  87. if (m_config.backend == LiteBackend::LITE_DEFAULT &&
  88. m_impl->get_backend_type() != LiteBackend::LITE_DEFAULT) {
  89. m_impl.reset(try_call_func<NetworkImplDft, lite::Network::NetworkImplBase*>(
  90. "parse_model"));
  91. }
  92. m_impl->set_config(m_config);
  93. m_impl->set_io(m_network_io);
  94. }
  95. //! decryption the model
  96. size_t model_length;
  97. auto&& model_shared_ptr = model_parser.parse_model(model_length, m_config);
  98. m_impl->load_model(model_shared_ptr, model_length, separate_config_map);
  99. m_loaded = true;
  100. update_from_implement();
  101. }
  102. Network::~Network() = default;
  103. void Network::update_from_implement() {
  104. m_config.device_type = m_impl->get_device_type();
  105. }
  106. void Network::compute_only_configured_output() {
  107. LITE_ERROR_HANDLER_BEGIN
  108. LITE_ASSERT(
  109. !m_loaded,
  110. "compute_only_configured_output should be used before model "
  111. "loaded.");
  112. LITE_CHECK_NON_NULL_POINTER(m_impl);
  113. return m_impl->compute_only_configured_output();
  114. LITE_ERROR_HANDLER_END
  115. }
  116. std::shared_ptr<Tensor> Network::get_io_tensor(
  117. std::string name, LiteTensorPhase phase) {
  118. LITE_ERROR_HANDLER_BEGIN
  119. LITE_ASSERT(m_loaded, "get_io_tensor should be used after model loaded.");
  120. LITE_CHECK_NON_NULL_POINTER(m_impl);
  121. return m_impl->get_io_tensor(name, phase);
  122. LITE_ERROR_HANDLER_END
  123. }
  124. std::shared_ptr<Tensor> Network::get_input_tensor(size_t index) {
  125. LITE_ERROR_HANDLER_BEGIN
  126. LITE_ASSERT(m_loaded, "get_input_tensor should be used after model loaded.");
  127. LITE_CHECK_NON_NULL_POINTER(m_impl);
  128. return m_impl->get_input_tensor(index);
  129. LITE_ERROR_HANDLER_END
  130. }
  131. std::shared_ptr<Tensor> Network::get_output_tensor(size_t index) {
  132. LITE_ERROR_HANDLER_BEGIN
  133. LITE_ASSERT(m_loaded, "get_output_tensor should be used after model loaded.");
  134. LITE_CHECK_NON_NULL_POINTER(m_impl);
  135. return m_impl->get_output_tensor(index);
  136. LITE_ERROR_HANDLER_END
  137. }
  138. Network& Network::set_async_callback(const AsyncCallback& callback) {
  139. LITE_ERROR_HANDLER_BEGIN
  140. LITE_ASSERT(
  141. !m_config.options.force_output_use_user_specified_memory,
  142. "Async mode can't run with force_output_use_user_specified_memory which "
  143. "output data is written to use specific memory.");
  144. LITE_CHECK_NON_NULL_POINTER(m_impl);
  145. m_impl->set_async_callback(std::move(callback));
  146. return *this;
  147. LITE_ERROR_HANDLER_END
  148. }
  149. Network& Network::set_start_callback(const StartCallback& callback) {
  150. LITE_ERROR_HANDLER_BEGIN
  151. LITE_CHECK_NON_NULL_POINTER(m_impl);
  152. m_impl->set_start_callback(std::move(callback));
  153. return *this;
  154. LITE_ERROR_HANDLER_END
  155. }
  156. Network& Network::set_finish_callback(const FinishCallback& callback) {
  157. LITE_ERROR_HANDLER_BEGIN
  158. LITE_CHECK_NON_NULL_POINTER(m_impl);
  159. m_impl->set_finish_callback(std::move(callback));
  160. return *this;
  161. LITE_ERROR_HANDLER_END
  162. }
  163. Network& Network::set_device_id(int device_id) {
  164. LITE_ERROR_HANDLER_BEGIN
  165. LITE_ASSERT(!m_loaded, "set_device_id should be used before model loaded.");
  166. LITE_CHECK_NON_NULL_POINTER(m_impl);
  167. m_impl->set_device_id(device_id);
  168. return *this;
  169. LITE_ERROR_HANDLER_END
  170. }
  171. Network& Network::set_stream_id(int stream_id) {
  172. LITE_ERROR_HANDLER_BEGIN
  173. LITE_ASSERT(!m_loaded, "set_stream_id should be used before model loaded.");
  174. LITE_CHECK_NON_NULL_POINTER(m_impl);
  175. m_impl->set_stream_id(stream_id);
  176. return *this;
  177. LITE_ERROR_HANDLER_END
  178. }
  179. void Network::forward() {
  180. LITE_ERROR_HANDLER_BEGIN
  181. LITE_ASSERT(m_loaded, "forward should be used after model loaded.");
  182. LITE_CHECK_NON_NULL_POINTER(m_impl.get());
  183. m_impl->forward();
  184. LITE_ERROR_HANDLER_END
  185. }
  186. void Network::wait() {
  187. LITE_ERROR_HANDLER_BEGIN
  188. LITE_ASSERT(m_loaded, "wait should be used after model loaded.");
  189. LITE_CHECK_NON_NULL_POINTER(m_impl);
  190. m_impl->wait();
  191. LITE_ERROR_HANDLER_END
  192. }
  193. std::string Network::get_input_name(size_t index) const {
  194. LITE_ERROR_HANDLER_BEGIN
  195. LITE_ASSERT(m_loaded, "get_input_name should be used after model loaded.");
  196. LITE_CHECK_NON_NULL_POINTER(m_impl);
  197. return m_impl->get_input_name(index);
  198. LITE_ERROR_HANDLER_END
  199. }
  200. std::string Network::get_output_name(size_t index) const {
  201. LITE_ERROR_HANDLER_BEGIN
  202. LITE_ASSERT(m_loaded, "get_output_name should be used after model loaded.");
  203. LITE_CHECK_NON_NULL_POINTER(m_impl);
  204. return m_impl->get_output_name(index);
  205. LITE_ERROR_HANDLER_END
  206. }
  207. std::vector<std::string> Network::get_all_input_name() const {
  208. LITE_ERROR_HANDLER_BEGIN
  209. LITE_ASSERT(m_loaded, "get_all_input_name should be used after model loaded.");
  210. LITE_CHECK_NON_NULL_POINTER(m_impl);
  211. auto all_input_name = m_impl->get_all_input_name();
  212. std::vector<std::string> all_names;
  213. for (auto& name : all_input_name) {
  214. all_names.push_back(name);
  215. }
  216. return all_names;
  217. LITE_ERROR_HANDLER_END
  218. }
  219. std::vector<std::string> Network::get_all_output_name() const {
  220. LITE_ERROR_HANDLER_BEGIN
  221. LITE_ASSERT(m_loaded, "get_all_output_name should be used after model loaded.");
  222. LITE_CHECK_NON_NULL_POINTER(m_impl);
  223. auto all_output_name = m_impl->get_all_output_name();
  224. std::vector<std::string> all_names;
  225. for (auto& name : all_output_name) {
  226. all_names.push_back(name);
  227. }
  228. return all_names;
  229. LITE_ERROR_HANDLER_END
  230. }
  231. int Network::get_device_id() const {
  232. LITE_ERROR_HANDLER_BEGIN
  233. LITE_CHECK_NON_NULL_POINTER(m_impl);
  234. return m_impl->get_device_id();
  235. LITE_ERROR_HANDLER_END
  236. }
  237. int Network::get_stream_id() const {
  238. LITE_ERROR_HANDLER_BEGIN
  239. LITE_CHECK_NON_NULL_POINTER(m_impl);
  240. return m_impl->get_stream_id();
  241. LITE_ERROR_HANDLER_END
  242. }
  243. void Network::enable_profile_performance(std::string profile_file_path) {
  244. LITE_ERROR_HANDLER_BEGIN
  245. m_impl->enable_profile_performance(profile_file_path);
  246. LITE_ERROR_HANDLER_END
  247. }
  248. const std::string& Network::get_model_extra_info() {
  249. LITE_ERROR_HANDLER_BEGIN
  250. return m_extra_info;
  251. LITE_ERROR_HANDLER_END
  252. }
  253. LiteDeviceType Network::get_device_type() const {
  254. LITE_ERROR_HANDLER_BEGIN
  255. return m_impl->get_device_type();
  256. LITE_ERROR_HANDLER_END
  257. }
  258. void Network::get_static_memory_alloc_info(const std::string& log_dir) const {
  259. LITE_ERROR_HANDLER_BEGIN
  260. #ifndef __IN_TEE_ENV__
  261. #if MGB_ENABLE_JSON
  262. LITE_ASSERT(m_loaded, "get_all_output_name should be used after model loaded.");
  263. m_impl->get_static_memory_alloc_info(log_dir);
  264. return;
  265. #endif
  266. #endif
  267. LITE_MARK_USED_VAR(log_dir);
  268. LITE_THROW("Doesn't support get_static_memory_alloc_info().Please check macro.");
  269. LITE_ERROR_HANDLER_END
  270. }
  271. /*********************** MGE special network function ***************/
  272. void Runtime::set_cpu_threads_number(
  273. std::shared_ptr<Network> network, size_t nr_threads) {
  274. LITE_ERROR_HANDLER_BEGIN
  275. auto network_impl = NetworkHelper::implement(network);
  276. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  277. LITE_ASSERT(
  278. !NetworkHelper::loaded(network),
  279. "set_cpu_threads_number should be used before model loaded.");
  280. call_func<NetworkImplDft, void>(
  281. "set_cpu_threads_number", network_impl, nr_threads);
  282. return;
  283. }
  284. LITE_THROW("set_cpu_threads_number is not aviliable in the backend.");
  285. LITE_ERROR_HANDLER_END
  286. }
  287. void Runtime::use_tensorrt(std::shared_ptr<Network> network) {
  288. LITE_ERROR_HANDLER_BEGIN
  289. auto network_impl = NetworkHelper::implement(network);
  290. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  291. LITE_ASSERT(
  292. !NetworkHelper::loaded(network),
  293. "use_tensorrt should be used before model loaded.");
  294. call_func<NetworkImplDft, void>("use_tensorrt", network_impl);
  295. return;
  296. }
  297. LITE_THROW("use_tensorrt is not aviliable in the backend.");
  298. LITE_ERROR_HANDLER_END
  299. }
  300. size_t Runtime::get_cpu_threads_number(const std::shared_ptr<Network> network) {
  301. LITE_ERROR_HANDLER_BEGIN
  302. auto network_impl = NetworkHelper::implement(network);
  303. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  304. return call_func<NetworkImplDft, size_t>(
  305. "get_cpu_threads_number", network_impl);
  306. }
  307. LITE_THROW("get_cpu_threads_number is not aviliable in the backend.");
  308. LITE_ERROR_HANDLER_END
  309. }
  310. void Runtime::set_runtime_thread_affinity(
  311. std::shared_ptr<Network> network,
  312. const ThreadAffinityCallback& thread_affinity_callback) {
  313. LITE_ERROR_HANDLER_BEGIN
  314. auto network_impl = NetworkHelper::implement(network);
  315. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  316. LITE_ASSERT(
  317. NetworkHelper::loaded(network),
  318. "set_runtime_thread_affinity should be used after model "
  319. "loaded.");
  320. call_func<NetworkImplDft, void>(
  321. "set_runtime_thread_affinity", network_impl, thread_affinity_callback);
  322. return;
  323. }
  324. LITE_THROW("set_runtime_thread_affinity is not aviliable in the backend.");
  325. LITE_ERROR_HANDLER_END
  326. }
  327. void Runtime::set_cpu_inplace_mode(std::shared_ptr<Network> network) {
  328. LITE_ERROR_HANDLER_BEGIN
  329. auto network_impl = NetworkHelper::implement(network);
  330. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  331. LITE_ASSERT(
  332. !NetworkHelper::loaded(network),
  333. "set_cpu_inplace_mode should be used before model loaded.");
  334. call_func<NetworkImplDft, void>("set_cpu_inplace_mode", network_impl);
  335. return;
  336. }
  337. LITE_THROW("set_cpu_inplace_mode is not aviliable in the backend.");
  338. LITE_ERROR_HANDLER_END
  339. }
  340. bool Runtime::is_cpu_inplace_mode(const std::shared_ptr<Network> network) {
  341. LITE_ERROR_HANDLER_BEGIN
  342. auto network_impl = NetworkHelper::implement(network);
  343. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  344. return call_func<NetworkImplDft, bool>("is_cpu_inplace_mode", network_impl);
  345. }
  346. LITE_THROW("is_cpu_inplace_mode is not aviliable in the backend.");
  347. LITE_ERROR_HANDLER_END
  348. }
  349. //! set opr algorithm selection strategy in the network
  350. void Runtime::set_network_algo_policy(
  351. std::shared_ptr<Network> network, LiteAlgoSelectStrategy strategy,
  352. uint32_t shared_batch_size, bool binary_equal_between_batch) {
  353. LITE_ERROR_HANDLER_BEGIN
  354. auto network_impl = NetworkHelper::implement(network);
  355. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  356. call_func<NetworkImplDft, void>(
  357. "set_network_algo_policy", network_impl, strategy, shared_batch_size,
  358. binary_equal_between_batch);
  359. return;
  360. }
  361. LITE_THROW("set_network_algo_policy is not aviliable in the backend.");
  362. LITE_ERROR_HANDLER_END
  363. }
  364. //! set opr algorithm selection strategy in the network
  365. void Runtime::set_network_algo_workspace_limit(
  366. std::shared_ptr<Network> network, size_t workspace_limit) {
  367. LITE_ERROR_HANDLER_BEGIN
  368. auto network_impl = NetworkHelper::implement(network);
  369. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  370. LITE_ASSERT(
  371. NetworkHelper::loaded(network),
  372. "set_network_algo_policy should be used after model "
  373. "loaded.");
  374. call_func<NetworkImplDft, void>(
  375. "set_network_algo_workspace_limit", network_impl, workspace_limit);
  376. return;
  377. }
  378. LITE_THROW(
  379. "set_network_algo_workspace_limit is not aviliable in the "
  380. "backend.");
  381. LITE_ERROR_HANDLER_END
  382. }
  383. //! set the network memroy allocator, the allocator is defined by user
  384. void Runtime::set_memory_allocator(
  385. std::shared_ptr<Network> network, std::shared_ptr<Allocator> user_allocator) {
  386. LITE_ERROR_HANDLER_BEGIN
  387. auto network_impl = NetworkHelper::implement(network);
  388. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  389. LITE_ASSERT(
  390. !NetworkHelper::loaded(network),
  391. "set_memory_allocator should be used before model loaded.");
  392. call_func<NetworkImplDft, void>(
  393. "set_memory_allocator", network_impl, user_allocator);
  394. return;
  395. }
  396. LITE_THROW("set_memory_allocator is not aviliable in the backend.");
  397. LITE_ERROR_HANDLER_END
  398. }
  399. void Runtime::share_runtime_memory_with(
  400. std::shared_ptr<Network> dst_network, std::shared_ptr<Network> src_network) {
  401. LITE_ERROR_HANDLER_BEGIN
  402. auto network_impl_dst = NetworkHelper::implement(dst_network);
  403. if (network_impl_dst->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  404. LITE_ASSERT(
  405. !NetworkHelper::loaded(dst_network),
  406. "share_runtime_memory_with should be used before model "
  407. "loaded.");
  408. call_func<NetworkImplDft, void>(
  409. "share_runtime_memory_with", network_impl_dst,
  410. NetworkHelper::implement(src_network));
  411. return;
  412. }
  413. LITE_THROW("share_runtime_memory_with is not aviliable in the backend.");
  414. LITE_ERROR_HANDLER_END
  415. }
  416. void Runtime::enable_io_txt_dump(
  417. std::shared_ptr<Network> network, std::string io_txt_out_file) {
  418. LITE_ERROR_HANDLER_BEGIN
  419. auto network_impl = NetworkHelper::implement(network);
  420. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  421. call_func<NetworkImplDft, void>(
  422. "enable_io_txt_dump", network_impl, io_txt_out_file);
  423. return;
  424. }
  425. LITE_THROW("enable_io_txt_dump is not aviliable in the backend.");
  426. LITE_ERROR_HANDLER_END
  427. }
  428. void Runtime::enable_io_bin_dump(
  429. std::shared_ptr<Network> network, std::string io_bin_out_dir) {
  430. LITE_ERROR_HANDLER_BEGIN
  431. auto network_impl = NetworkHelper::implement(network);
  432. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  433. call_func<NetworkImplDft, void>(
  434. "enable_io_bin_dump", network_impl, io_bin_out_dir);
  435. return;
  436. }
  437. LITE_THROW("enable_io_bin_dump is not aviliable in the backend.");
  438. LITE_ERROR_HANDLER_END
  439. }
  440. void Runtime::shared_weight_with_network(
  441. std::shared_ptr<Network> dst_network,
  442. const std::shared_ptr<Network> src_network) {
  443. LITE_ERROR_HANDLER_BEGIN
  444. auto network_impl_dst = NetworkHelper::implement(dst_network);
  445. if (network_impl_dst->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  446. LITE_ASSERT(
  447. NetworkHelper::loaded(src_network),
  448. "shared_weight_with_network should be used after the src "
  449. "network "
  450. "loaded.");
  451. auto src_implment = NetworkHelper::implement(src_network);
  452. call_func<NetworkImplDft, void>(
  453. "shared_weight_with", network_impl_dst, src_implment);
  454. NetworkHelper::loaded(dst_network, true);
  455. return;
  456. }
  457. LITE_THROW("shared_weight_with_network is not aviliable in the backend.");
  458. LITE_ERROR_HANDLER_END
  459. }
  460. void Runtime::enable_global_layout_transform(std::shared_ptr<Network> network) {
  461. LITE_ERROR_HANDLER_BEGIN
  462. auto network_impl = NetworkHelper::implement(network);
  463. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  464. LITE_ASSERT(
  465. !NetworkHelper::loaded(network),
  466. "enable_global_layout_transform should be used before model loaded.");
  467. call_func<NetworkImplDft, void>("enable_global_layout_transform", network_impl);
  468. return;
  469. }
  470. LITE_THROW("enable_global_layout_transform is not aviliable in the backend.");
  471. LITE_ERROR_HANDLER_END
  472. }
  473. void Runtime::dump_layout_transform_model(
  474. std::shared_ptr<Network> network, std::string optimized_model_path) {
  475. LITE_ERROR_HANDLER_BEGIN
  476. auto network_impl = NetworkHelper::implement(network);
  477. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  478. LITE_ASSERT(
  479. NetworkHelper::loaded(network),
  480. "dump_layout_transform_model should be used after model loaded.");
  481. call_func<NetworkImplDft, void>(
  482. "dump_layout_transform_model", network_impl, optimized_model_path);
  483. return;
  484. }
  485. LITE_THROW("dump_layout_transform_model is not aviliable in the backend.");
  486. LITE_ERROR_HANDLER_END
  487. }
  488. NetworkIO Runtime::get_model_io_info(
  489. const std::string& model_path, const Config& config) {
  490. LITE_ERROR_HANDLER_BEGIN
  491. if (config.backend == LiteBackend::LITE_DEFAULT) {
  492. return call_func<NetworkImplDft, NetworkIO>(
  493. "get_model_io_info", model_path, config);
  494. }
  495. LITE_THROW("get_model_io_info is not aviliable in the backend.");
  496. LITE_ERROR_HANDLER_END
  497. }
  498. NetworkIO Runtime::get_model_io_info(
  499. const void* model_mem, size_t size, const Config& config) {
  500. LITE_ERROR_HANDLER_BEGIN
  501. if (config.backend == LiteBackend::LITE_DEFAULT) {
  502. return call_func<NetworkImplDft, NetworkIO>(
  503. "get_model_io_info", model_mem, size, config);
  504. }
  505. LITE_THROW("get_model_io_info is not aviliable in the backend.");
  506. LITE_ERROR_HANDLER_END
  507. }
  508. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}