You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

main.cc 24 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <iostream>
  17. #include <cstring>
  18. #include <random>
  19. #include <fstream>
  20. #include <thread>
  21. #include <algorithm>
  22. #include "include/api/allocator.h"
  23. #include "include/api/model.h"
  24. #include "include/api/context.h"
  25. #include "include/api/types.h"
  26. #include "include/api/serialization.h"
  27. std::string RealPath(const char *path) {
  28. const size_t max = 4096;
  29. if (path == nullptr) {
  30. std::cerr << "path is nullptr" << std::endl;
  31. return "";
  32. }
  33. if ((strlen(path)) >= max) {
  34. std::cerr << "path is too long" << std::endl;
  35. return "";
  36. }
  37. auto resolved_path = std::make_unique<char[]>(max);
  38. if (resolved_path == nullptr) {
  39. std::cerr << "new resolved_path failed" << std::endl;
  40. return "";
  41. }
  42. #ifdef _WIN32
  43. char *real_path = _fullpath(resolved_path.get(), path, 1024);
  44. #else
  45. char *real_path = realpath(path, resolved_path.get());
  46. #endif
  47. if (real_path == nullptr || strlen(real_path) == 0) {
  48. std::cerr << "file path is not valid : " << path << std::endl;
  49. return "";
  50. }
  51. std::string res = resolved_path.get();
  52. return res;
  53. }
  54. char *ReadFile(const char *file, size_t *size) {
  55. if (file == nullptr) {
  56. std::cerr << "file is nullptr." << std::endl;
  57. return nullptr;
  58. }
  59. std::ifstream ifs(file);
  60. if (!ifs.good()) {
  61. std::cerr << "file: " << file << " is not exist." << std::endl;
  62. return nullptr;
  63. }
  64. if (!ifs.is_open()) {
  65. std::cerr << "file: " << file << " open failed." << std::endl;
  66. return nullptr;
  67. }
  68. ifs.seekg(0, std::ios::end);
  69. *size = ifs.tellg();
  70. std::unique_ptr<char[]> buf(new (std::nothrow) char[*size]);
  71. if (buf == nullptr) {
  72. std::cerr << "malloc buf failed, file: " << file << std::endl;
  73. ifs.close();
  74. return nullptr;
  75. }
  76. ifs.seekg(0, std::ios::beg);
  77. ifs.read(buf.get(), *size);
  78. ifs.close();
  79. return buf.release();
  80. }
  81. template <typename T, typename Distribution>
  82. void GenerateRandomData(int size, void *data, Distribution distribution) {
  83. if (data == nullptr) {
  84. std::cerr << "data is nullptr." << std::endl;
  85. return;
  86. }
  87. std::mt19937 random_engine;
  88. int elements_num = size / sizeof(T);
  89. (void)std::generate_n(static_cast<T *>(data), elements_num,
  90. [&]() { return static_cast<T>(distribution(random_engine)); });
  91. }
  92. std::shared_ptr<mindspore::CPUDeviceInfo> CreateCPUDeviceInfo() {
  93. auto device_info = std::make_shared<mindspore::CPUDeviceInfo>();
  94. if (device_info == nullptr) {
  95. std::cerr << "New CPUDeviceInfo failed." << std::endl;
  96. return nullptr;
  97. }
  98. // Use float16 operator as priority.
  99. device_info->SetEnableFP16(true);
  100. return device_info;
  101. }
  102. std::shared_ptr<mindspore::GPUDeviceInfo> CreateGPUDeviceInfo() {
  103. auto device_info = std::make_shared<mindspore::GPUDeviceInfo>();
  104. if (device_info == nullptr) {
  105. std::cerr << "New GPUDeviceInfo failed." << std::endl;
  106. return nullptr;
  107. }
  108. // If GPU device info is set. The preferred backend is GPU, which means, if there is a GPU operator, it will run on
  109. // the GPU first, otherwise it will run on the CPU.
  110. // GPU use float16 operator as priority.
  111. device_info->SetEnableFP16(true);
  112. return device_info;
  113. }
  114. std::shared_ptr<mindspore::KirinNPUDeviceInfo> CreateNPUDeviceInfo() {
  115. auto device_info = std::make_shared<mindspore::KirinNPUDeviceInfo>();
  116. if (device_info == nullptr) {
  117. std::cerr << "New KirinNPUDeviceInfo failed." << std::endl;
  118. return nullptr;
  119. }
  120. device_info->SetFrequency(3);
  121. return device_info;
  122. }
  123. mindspore::Status GetInputsAndSetData(mindspore::Model *model) {
  124. auto inputs = model->GetInputs();
  125. // The model has only one input tensor.
  126. auto in_tensor = inputs.front();
  127. if (in_tensor == nullptr) {
  128. std::cerr << "Input tensor is nullptr" << std::endl;
  129. return mindspore::kLiteNullptr;
  130. }
  131. auto input_data = in_tensor.MutableData();
  132. if (input_data == nullptr) {
  133. std::cerr << "MallocData for inTensor failed." << std::endl;
  134. return mindspore::kLiteNullptr;
  135. }
  136. GenerateRandomData<float>(in_tensor.DataSize(), input_data, std::uniform_real_distribution<float>(0.1f, 1.0f));
  137. return mindspore::kSuccess;
  138. }
  139. mindspore::Status GetInputsByTensorNameAndSetData(mindspore::Model *model) {
  140. auto in_tensor = model->GetInputByTensorName("graph_input-173");
  141. if (in_tensor == nullptr) {
  142. std::cerr << "Input tensor is nullptr" << std::endl;
  143. return mindspore::kLiteNullptr;
  144. }
  145. auto input_data = in_tensor.MutableData();
  146. if (input_data == nullptr) {
  147. std::cerr << "MallocData for inTensor failed." << std::endl;
  148. return mindspore::kLiteNullptr;
  149. }
  150. GenerateRandomData<float>(in_tensor.DataSize(), input_data, std::uniform_real_distribution<float>(0.1f, 1.0f));
  151. return mindspore::kSuccess;
  152. }
  153. void GetOutputsByNodeName(mindspore::Model *model) {
  154. // model has a output node named output_node_name_0.
  155. auto output_vec = model->GetOutputsByNodeName("Softmax-65");
  156. // output node named output_node_name_0 has only one output tensor.
  157. auto out_tensor = output_vec.front();
  158. if (out_tensor == nullptr) {
  159. std::cerr << "Output tensor is nullptr" << std::endl;
  160. return;
  161. }
  162. std::cout << "tensor size is:" << out_tensor.DataSize() << " tensor elements num is:" << out_tensor.ElementNum()
  163. << std::endl;
  164. // The model output data is float 32.
  165. if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
  166. std::cerr << "Output should in float32" << std::endl;
  167. return;
  168. }
  169. auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
  170. if (out_data == nullptr) {
  171. std::cerr << "Data of out_tensor is nullptr" << std::endl;
  172. return;
  173. }
  174. std::cout << "output data is:";
  175. for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
  176. std::cout << out_data[i] << " ";
  177. }
  178. std::cout << std::endl;
  179. }
  180. void GetOutputByTensorName(mindspore::Model *model) {
  181. // We can use GetOutputTensorNames method to get all name of output tensor of model which is in order.
  182. auto tensor_names = model->GetOutputTensorNames();
  183. for (const auto &tensor_name : tensor_names) {
  184. auto out_tensor = model->GetOutputByTensorName(tensor_name);
  185. if (out_tensor == nullptr) {
  186. std::cerr << "Output tensor is nullptr" << std::endl;
  187. return;
  188. }
  189. std::cout << "tensor size is:" << out_tensor.DataSize() << " tensor elements num is:" << out_tensor.ElementNum()
  190. << std::endl;
  191. // The model output data is float 32.
  192. if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
  193. std::cerr << "Output should in float32" << std::endl;
  194. return;
  195. }
  196. auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
  197. if (out_data == nullptr) {
  198. std::cerr << "Data of out_tensor is nullptr" << std::endl;
  199. return;
  200. }
  201. std::cout << "output data is:";
  202. for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
  203. std::cout << out_data[i] << " ";
  204. }
  205. std::cout << std::endl;
  206. }
  207. }
  208. void GetOutputs(mindspore::Model *model) {
  209. auto out_tensors = model->GetOutputs();
  210. for (auto out_tensor : out_tensors) {
  211. std::cout << "tensor name is:" << out_tensor.Name() << " tensor size is:" << out_tensor.DataSize()
  212. << " tensor elements num is:" << out_tensor.ElementNum() << std::endl;
  213. // The model output data is float 32.
  214. if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
  215. std::cerr << "Output should in float32" << std::endl;
  216. return;
  217. }
  218. auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
  219. if (out_data == nullptr) {
  220. std::cerr << "Data of out_tensor is nullptr" << std::endl;
  221. return;
  222. }
  223. std::cout << "output data is:";
  224. for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
  225. std::cout << out_data[i] << " ";
  226. }
  227. std::cout << std::endl;
  228. }
  229. }
  230. mindspore::Model *CreateAndBuildModel(char *model_buf, size_t model_size) {
  231. // Create and init context, add CPU device info
  232. auto context = std::make_shared<mindspore::Context>();
  233. if (context == nullptr) {
  234. std::cerr << "New context failed." << std::endl;
  235. return nullptr;
  236. }
  237. auto &device_list = context->MutableDeviceInfo();
  238. // If you need to use GPU or NPU, you can refer to CreateGPUDeviceInfo() or CreateNPUDeviceInfo().
  239. auto cpu_device_info = CreateCPUDeviceInfo();
  240. if (cpu_device_info == nullptr) {
  241. std::cerr << "Create CPUDeviceInfo failed." << std::endl;
  242. return nullptr;
  243. }
  244. device_list.push_back(cpu_device_info);
  245. // Create model
  246. auto model = new (std::nothrow) mindspore::Model();
  247. if (model == nullptr) {
  248. std::cerr << "New Model failed." << std::endl;
  249. return nullptr;
  250. }
  251. // Build model
  252. auto build_ret = model->Build(model_buf, model_size, mindspore::kMindIR, context);
  253. if (build_ret != mindspore::kSuccess) {
  254. delete model;
  255. std::cerr << "Build model failed." << std::endl;
  256. return nullptr;
  257. }
  258. return model;
  259. }
  260. mindspore::Model *CreateAndBuildModelComplicated(char *model_buf, size_t size) {
  261. // Create and init context, add CPU device info
  262. auto context = std::make_shared<mindspore::Context>();
  263. if (context == nullptr) {
  264. std::cerr << "New context failed." << std::endl;
  265. return nullptr;
  266. }
  267. auto &device_list = context->MutableDeviceInfo();
  268. auto cpu_device_info = CreateCPUDeviceInfo();
  269. if (cpu_device_info == nullptr) {
  270. std::cerr << "Create CPUDeviceInfo failed." << std::endl;
  271. return nullptr;
  272. }
  273. device_list.push_back(cpu_device_info);
  274. // Load graph
  275. mindspore::Graph graph;
  276. auto load_ret = mindspore::Serialization::Load(model_buf, size, mindspore::kMindIR, &graph);
  277. if (load_ret != mindspore::kSuccess) {
  278. std::cerr << "Load graph failed." << std::endl;
  279. return nullptr;
  280. }
  281. // Create model
  282. auto model = new (std::nothrow) mindspore::Model();
  283. if (model == nullptr) {
  284. std::cerr << "New Model failed." << std::endl;
  285. return nullptr;
  286. }
  287. // Build model
  288. mindspore::GraphCell graph_cell(graph);
  289. auto build_ret = model->Build(graph_cell, context);
  290. if (build_ret != mindspore::kSuccess) {
  291. delete model;
  292. std::cerr << "Build model failed." << std::endl;
  293. return nullptr;
  294. }
  295. return model;
  296. }
  297. mindspore::Status ResizeInputsTensorShape(mindspore::Model *model) {
  298. auto inputs = model->GetInputs();
  299. std::vector<int64_t> resize_shape = {1, 128, 128, 3};
  300. // Assume the model has only one input,resize input shape to [1, 128, 128, 3]
  301. std::vector<std::vector<int64_t>> new_shapes;
  302. new_shapes.push_back(resize_shape);
  303. return model->Resize(inputs, new_shapes);
  304. }
  305. int Run(const char *model_path) {
  306. // Read model file.
  307. size_t size = 0;
  308. char *model_buf = ReadFile(model_path, &size);
  309. if (model_buf == nullptr) {
  310. std::cerr << "Read model file failed." << std::endl;
  311. return -1;
  312. }
  313. // Create and Build MindSpore model.
  314. auto model = CreateAndBuildModel(model_buf, size);
  315. delete[](model_buf);
  316. if (model == nullptr) {
  317. std::cerr << "Create and build model failed." << std::endl;
  318. return -1;
  319. }
  320. // Set inputs data.
  321. // You can also get input through other methods, and you can refer to GetInputsAndSetData()
  322. auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
  323. if (generate_input_ret != mindspore::kSuccess) {
  324. delete model;
  325. std::cerr << "Set input data error " << generate_input_ret << std::endl;
  326. return -1;
  327. }
  328. auto inputs = model->GetInputs();
  329. auto outputs = model->GetOutputs();
  330. auto predict_ret = model->Predict(inputs, &outputs);
  331. if (predict_ret != mindspore::kSuccess) {
  332. delete model;
  333. std::cerr << "Predict error " << predict_ret << std::endl;
  334. return -1;
  335. }
  336. // Get outputs data.
  337. // You can also get output through other methods,
  338. // and you can refer to GetOutputByTensorName() or GetOutputs().
  339. GetOutputsByNodeName(model);
  340. // Delete model.
  341. delete model;
  342. return 0;
  343. }
  344. int RunResize(const char *model_path) {
  345. size_t size = 0;
  346. char *model_buf = ReadFile(model_path, &size);
  347. if (model_buf == nullptr) {
  348. std::cerr << "Read model file failed." << std::endl;
  349. return -1;
  350. }
  351. // Create and Build MindSpore model.
  352. auto model = CreateAndBuildModel(model_buf, size);
  353. delete[](model_buf);
  354. if (model == nullptr) {
  355. std::cerr << "Create and build model failed." << std::endl;
  356. return -1;
  357. }
  358. // Resize inputs tensor shape.
  359. auto resize_ret = ResizeInputsTensorShape(model);
  360. if (resize_ret != mindspore::kSuccess) {
  361. delete model;
  362. std::cerr << "Resize input tensor shape error." << resize_ret << std::endl;
  363. return -1;
  364. }
  365. // Set inputs data.
  366. // You can also get input through other methods, and you can refer to GetInputsAndSetData()
  367. auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
  368. if (generate_input_ret != mindspore::kSuccess) {
  369. delete model;
  370. std::cerr << "Set input data error " << generate_input_ret << std::endl;
  371. return -1;
  372. }
  373. auto inputs = model->GetInputs();
  374. auto outputs = model->GetOutputs();
  375. auto predict_ret = model->Predict(inputs, &outputs);
  376. if (predict_ret != mindspore::kSuccess) {
  377. delete model;
  378. std::cerr << "Predict error " << predict_ret << std::endl;
  379. return -1;
  380. }
  381. // Get outputs data.
  382. // You can also get output through other methods,
  383. // and you can refer to GetOutputByTensorName() or GetOutputs().
  384. GetOutputsByNodeName(model);
  385. // Delete model.
  386. delete model;
  387. return 0;
  388. }
  389. int RunCreateModelComplicated(const char *model_path) {
  390. size_t size = 0;
  391. char *model_buf = ReadFile(model_path, &size);
  392. if (model_buf == nullptr) {
  393. std::cerr << "Read model file failed." << std::endl;
  394. return -1;
  395. }
  396. // Create and Build MindSpore model.
  397. auto model = CreateAndBuildModelComplicated(model_buf, size);
  398. delete[](model_buf);
  399. if (model == nullptr) {
  400. std::cerr << "Create and build model failed." << std::endl;
  401. return -1;
  402. }
  403. // Set inputs data.
  404. // You can also get input through other methods, and you can refer to GetInputsAndSetData()
  405. auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
  406. if (generate_input_ret != mindspore::kSuccess) {
  407. delete model;
  408. std::cerr << "Set input data error " << generate_input_ret << std::endl;
  409. return -1;
  410. }
  411. auto inputs = model->GetInputs();
  412. auto outputs = model->GetOutputs();
  413. auto predict_ret = model->Predict(inputs, &outputs);
  414. if (predict_ret != mindspore::kSuccess) {
  415. delete model;
  416. std::cerr << "Predict error " << predict_ret << std::endl;
  417. return -1;
  418. }
  419. // Get outputs data.
  420. // You can also get output through other methods,
  421. // and you can refer to GetOutputByTensorName() or GetOutputs().
  422. GetOutputsByNodeName(model);
  423. // Delete model.
  424. delete model;
  425. return 0;
  426. }
  427. int RunModelParallel(const char *model_path) {
  428. size_t size = 0;
  429. char *model_buf = ReadFile(model_path, &size);
  430. if (model_buf == nullptr) {
  431. std::cerr << "Read model file failed." << std::endl;
  432. return -1;
  433. }
  434. // Create and Build MindSpore model.
  435. auto model1 = CreateAndBuildModel(model_buf, size);
  436. auto model2 = CreateAndBuildModel(model_buf, size);
  437. delete[](model_buf);
  438. if (model1 == nullptr || model2 == nullptr) {
  439. std::cerr << "Create and build model failed." << std::endl;
  440. return -1;
  441. }
  442. std::thread thread1([&]() {
  443. auto generate_input_ret = GetInputsByTensorNameAndSetData(model1);
  444. if (generate_input_ret != mindspore::kSuccess) {
  445. std::cerr << "Model1 set input data error " << generate_input_ret << std::endl;
  446. return -1;
  447. }
  448. auto inputs = model1->GetInputs();
  449. auto outputs = model1->GetOutputs();
  450. auto predict_ret = model1->Predict(inputs, &outputs);
  451. if (predict_ret != mindspore::kSuccess) {
  452. std::cerr << "Model1 predict error " << predict_ret << std::endl;
  453. return -1;
  454. }
  455. std::cout << "Model1 predict success" << std::endl;
  456. return 0;
  457. });
  458. std::thread thread2([&]() {
  459. auto generate_input_ret = GetInputsByTensorNameAndSetData(model2);
  460. if (generate_input_ret != mindspore::kSuccess) {
  461. std::cerr << "Model2 set input data error " << generate_input_ret << std::endl;
  462. return -1;
  463. }
  464. auto inputs = model2->GetInputs();
  465. auto outputs = model2->GetOutputs();
  466. auto predict_ret = model2->Predict(inputs, &outputs);
  467. if (predict_ret != mindspore::kSuccess) {
  468. std::cerr << "Model2 predict error " << predict_ret << std::endl;
  469. return -1;
  470. }
  471. std::cout << "Model2 predict success" << std::endl;
  472. return 0;
  473. });
  474. thread1.join();
  475. thread2.join();
  476. // Get outputs data.
  477. // You can also get output through other methods,
  478. // and you can refer to GetOutputByTensorName() or GetOutputs().
  479. GetOutputsByNodeName(model1);
  480. GetOutputsByNodeName(model2);
  481. // Delete model.
  482. delete model1;
  483. delete model2;
  484. return 0;
  485. }
  486. int RunWithSharedMemoryPool(const char *model_path) {
  487. size_t size = 0;
  488. char *model_buf = ReadFile(model_path, &size);
  489. if (model_buf == nullptr) {
  490. std::cerr << "Read model file failed." << std::endl;
  491. return -1;
  492. }
  493. auto context1 = std::make_shared<mindspore::Context>();
  494. if (context1 == nullptr) {
  495. std::cerr << "New context failed." << std::endl;
  496. return -1;
  497. }
  498. auto &device_list1 = context1->MutableDeviceInfo();
  499. auto device_info1 = CreateCPUDeviceInfo();
  500. if (device_info1 == nullptr) {
  501. std::cerr << "Create CPUDeviceInfo failed." << std::endl;
  502. return -1;
  503. }
  504. device_list1.push_back(device_info1);
  505. auto model1 = new (std::nothrow) mindspore::Model();
  506. if (model1 == nullptr) {
  507. delete[](model_buf);
  508. std::cerr << "New Model failed." << std::endl;
  509. return -1;
  510. }
  511. auto build_ret = model1->Build(model_buf, size, mindspore::kMindIR, context1);
  512. if (build_ret != mindspore::kSuccess) {
  513. delete[](model_buf);
  514. delete model1;
  515. std::cerr << "Build model failed." << std::endl;
  516. return -1;
  517. }
  518. auto context2 = std::make_shared<mindspore::Context>();
  519. if (context2 == nullptr) {
  520. delete[](model_buf);
  521. delete model1;
  522. std::cerr << "New context failed." << std::endl;
  523. return -1;
  524. }
  525. auto &device_list2 = context2->MutableDeviceInfo();
  526. auto device_info2 = CreateCPUDeviceInfo();
  527. if (device_info2 == nullptr) {
  528. delete[](model_buf);
  529. delete model1;
  530. std::cerr << "Create CPUDeviceInfo failed." << std::endl;
  531. return -1;
  532. }
  533. // Use the same allocator to share the memory pool.
  534. device_info2->SetAllocator(device_info1->GetAllocator());
  535. device_list2.push_back(device_info2);
  536. auto model2 = new (std::nothrow) mindspore::Model();
  537. if (model2 == nullptr) {
  538. delete[](model_buf);
  539. delete model1;
  540. std::cerr << "New Model failed." << std::endl;
  541. return -1;
  542. }
  543. build_ret = model2->Build(model_buf, size, mindspore::kMindIR, context2);
  544. delete[](model_buf);
  545. if (build_ret != mindspore::kSuccess) {
  546. delete model1;
  547. delete model2;
  548. std::cerr << "Build model failed." << std::endl;
  549. return -1;
  550. }
  551. // Set inputs data.
  552. // You can also get input through other methods, and you can refer to GetInputsAndSetData()
  553. GetInputsByTensorNameAndSetData(model1);
  554. GetInputsByTensorNameAndSetData(model2);
  555. auto inputs1 = model1->GetInputs();
  556. auto outputs1 = model1->GetOutputs();
  557. auto predict_ret = model1->Predict(inputs1, &outputs1);
  558. if (predict_ret != mindspore::kSuccess) {
  559. delete model1;
  560. delete model2;
  561. std::cerr << "Inference error " << predict_ret << std::endl;
  562. return -1;
  563. }
  564. auto inputs2 = model2->GetInputs();
  565. auto outputs2 = model2->GetOutputs();
  566. predict_ret = model2->Predict(inputs2, &outputs2);
  567. if (predict_ret != mindspore::kSuccess) {
  568. delete model1;
  569. delete model2;
  570. std::cerr << "Inference error " << predict_ret << std::endl;
  571. return -1;
  572. }
  573. // Get outputs data.
  574. // You can also get output through other methods,
  575. // and you can refer to GetOutputByTensorName() or GetOutputs().
  576. GetOutputsByNodeName(model1);
  577. GetOutputsByNodeName(model2);
  578. // Delete model.
  579. delete model1;
  580. delete model2;
  581. return 0;
  582. }
  583. int RunCallback(const char *model_path) {
  584. size_t size = 0;
  585. char *model_buf = ReadFile(model_path, &size);
  586. if (model_buf == nullptr) {
  587. std::cerr << "Read model file failed." << std::endl;
  588. return -1;
  589. }
  590. // Create and Build MindSpore model.
  591. auto model = CreateAndBuildModel(model_buf, size);
  592. delete[](model_buf);
  593. if (model == nullptr) {
  594. delete model;
  595. std::cerr << "Create model failed." << std::endl;
  596. return -1;
  597. }
  598. // Set inputs data.
  599. // You can also get input through other methods, and you can refer to GetInputsAndSetData()
  600. auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
  601. if (generate_input_ret != mindspore::kSuccess) {
  602. delete model;
  603. std::cerr << "Set input data error " << generate_input_ret << std::endl;
  604. return -1;
  605. }
  606. // Definition of callback function before forwarding operator.
  607. auto before_call_back = [](const std::vector<mindspore::MSTensor> &before_inputs,
  608. const std::vector<mindspore::MSTensor> &before_outputs,
  609. const mindspore::MSCallBackParam &call_param) {
  610. std::cout << "Before forwarding " << call_param.node_name_ << " " << call_param.node_type_ << std::endl;
  611. return true;
  612. };
  613. // Definition of callback function after forwarding operator.
  614. auto after_call_back = [](const std::vector<mindspore::MSTensor> &after_inputs,
  615. const std::vector<mindspore::MSTensor> &after_outputs,
  616. const mindspore::MSCallBackParam &call_param) {
  617. std::cout << "After forwarding " << call_param.node_name_ << " " << call_param.node_type_ << std::endl;
  618. return true;
  619. };
  620. auto inputs = model->GetInputs();
  621. auto outputs = model->GetOutputs();
  622. auto predict_ret = model->Predict(inputs, &outputs, before_call_back, after_call_back);
  623. if (predict_ret != mindspore::kSuccess) {
  624. delete model;
  625. std::cerr << "Predict error " << predict_ret << std::endl;
  626. return -1;
  627. }
  628. // Get outputs data.
  629. // You can also get output through other methods,
  630. // and you can refer to GetOutputByTensorName() or GetOutputs().
  631. GetOutputsByNodeName(model);
  632. // Delete model.
  633. delete model;
  634. return 0;
  635. }
  636. int main(int argc, const char **argv) {
  637. if (argc < 3) {
  638. std::cerr << "Usage: ./runtime_cpp model_path Option" << std::endl;
  639. std::cerr << "Example: ./runtime_cpp ../model/mobilenetv2.ms 0" << std::endl;
  640. std::cerr << "When your Option is 0, you will run MindSpore Lite predict." << std::endl;
  641. std::cerr << "When your Option is 1, you will run MindSpore Lite predict with resize." << std::endl;
  642. std::cerr << "When your Option is 2, you will run MindSpore Lite predict with complicated API." << std::endl;
  643. std::cerr << "When your Option is 3, you will run MindSpore Lite predict with model parallel." << std::endl;
  644. std::cerr << "When your Option is 4, you will run MindSpore Lite predict with shared memory pool." << std::endl;
  645. std::cerr << "When your Option is 5, you will run MindSpore Lite predict with callback." << std::endl;
  646. return -1;
  647. }
  648. std::string version = mindspore::Version();
  649. std::cout << "MindSpore Lite Version is " << version << std::endl;
  650. auto model_path = RealPath(argv[1]);
  651. if (model_path.empty()) {
  652. std::cerr << "model path " << argv[1] << " is invalid.";
  653. return -1;
  654. }
  655. auto flag = argv[2];
  656. if (strcmp(flag, "0") == 0) {
  657. return Run(model_path.c_str());
  658. } else if (strcmp(flag, "1") == 0) {
  659. return RunResize(model_path.c_str());
  660. } else if (strcmp(flag, "2") == 0) {
  661. return RunCreateModelComplicated(model_path.c_str());
  662. } else if (strcmp(flag, "3") == 0) {
  663. return RunModelParallel(model_path.c_str());
  664. } else if (strcmp(flag, "4") == 0) {
  665. return RunWithSharedMemoryPool(model_path.c_str());
  666. } else if (strcmp(flag, "5") == 0) {
  667. return RunCallback(model_path.c_str());
  668. } else {
  669. std::cerr << "Unsupported Flag " << flag << std::endl;
  670. return -1;
  671. }
  672. }