You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

execute.cc 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <algorithm>
  17. #include <fstream>
  18. #include "minddata/dataset/include/execute.h"
  19. #include "minddata/dataset/core/de_tensor.h"
  20. #include "minddata/dataset/core/device_resource.h"
  21. #include "minddata/dataset/core/device_tensor.h"
  22. #include "minddata/dataset/core/tensor_row.h"
  23. #include "minddata/dataset/include/tensor.h"
  24. #include "minddata/dataset/include/type_id.h"
  25. #include "minddata/dataset/kernels/ir/tensor_operation.h"
  26. #include "minddata/dataset/kernels/tensor_op.h"
  27. #ifndef ENABLE_ANDROID
  28. #include "utils/log_adapter.h"
  29. #else
  30. #include "mindspore/lite/src/common/log_adapter.h"
  31. #endif
  32. #ifdef ENABLE_ACL
  33. #include "minddata/dataset/core/ascend_resource.h"
  34. #include "minddata/dataset/kernels/ir/vision/ascend_vision_ir.h"
  35. #endif
  36. namespace mindspore {
  37. namespace dataset {
  38. using json = nlohmann::json;
  39. struct Execute::ExtraInfo {
  40. std::multimap<std::string, std::vector<uint32_t>> aipp_cfg_;
  41. };
  42. // FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
  43. Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType) {
  44. ops_.emplace_back(std::move(op));
  45. device_type_ = deviceType;
  46. info_ = std::make_shared<ExtraInfo>();
  47. #ifdef ENABLE_ACL
  48. if (device_type_ == MapTargetDevice::kAscend310) {
  49. device_resource_ = std::make_shared<AscendResource>();
  50. Status rc = device_resource_->InitResource();
  51. if (!rc.IsOk()) {
  52. device_resource_ = nullptr;
  53. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  54. }
  55. }
  56. #endif
  57. }
  58. Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType) {
  59. // Convert op from TensorTransform to TensorOperation
  60. std::shared_ptr<TensorOperation> operation;
  61. info_ = std::make_shared<ExtraInfo>();
  62. if (deviceType == MapTargetDevice::kCpu) {
  63. operation = op->Parse();
  64. } else {
  65. operation = op->Parse(deviceType);
  66. }
  67. ops_.emplace_back(std::move(operation));
  68. device_type_ = deviceType;
  69. #ifdef ENABLE_ACL
  70. if (device_type_ == MapTargetDevice::kAscend310) {
  71. device_resource_ = std::make_shared<AscendResource>();
  72. Status rc = device_resource_->InitResource();
  73. if (!rc.IsOk()) {
  74. device_resource_ = nullptr;
  75. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  76. }
  77. }
  78. #endif
  79. }
  80. /*
  81. Execute::Execute(TensorTransform op, MapTargetDevice deviceType) {
  82. // Convert op from TensorTransform to TensorOperation
  83. std::shared_ptr<TensorOperation> operation = op.Parse();
  84. ops_.emplace_back(std::move(operation));
  85. device_type_ = deviceType;
  86. #ifdef ENABLE_ACL
  87. if (device_type_ == MapTargetDevice::kAscend310) {
  88. device_resource_ = std::make_shared<AscendResource>();
  89. Status rc = device_resource_->InitResource();
  90. if (!rc.IsOk()) {
  91. device_resource_ = nullptr;
  92. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  93. }
  94. }
  95. #endif
  96. }
  97. */
  98. // Execute function for the example case: auto decode(new vision::Decode());
  99. Execute::Execute(TensorTransform *op, MapTargetDevice deviceType) {
  100. // Convert op from TensorTransform to TensorOperation
  101. std::shared_ptr<TensorOperation> operation;
  102. info_ = std::make_shared<ExtraInfo>();
  103. if (deviceType == MapTargetDevice::kCpu) {
  104. operation = op->Parse();
  105. } else {
  106. operation = op->Parse(deviceType);
  107. }
  108. ops_.emplace_back(std::move(operation));
  109. device_type_ = deviceType;
  110. #ifdef ENABLE_ACL
  111. if (device_type_ == MapTargetDevice::kAscend310) {
  112. device_resource_ = std::make_shared<AscendResource>();
  113. Status rc = device_resource_->InitResource();
  114. if (!rc.IsOk()) {
  115. device_resource_ = nullptr;
  116. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  117. }
  118. }
  119. #endif
  120. }
  121. Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDevice deviceType)
  122. : ops_(std::move(ops)), device_type_(deviceType) {
  123. info_ = std::make_shared<ExtraInfo>();
  124. #ifdef ENABLE_ACL
  125. if (device_type_ == MapTargetDevice::kAscend310) {
  126. device_resource_ = std::make_shared<AscendResource>();
  127. Status rc = device_resource_->InitResource();
  128. if (!rc.IsOk()) {
  129. device_resource_ = nullptr;
  130. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  131. }
  132. }
  133. #endif
  134. }
  135. Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice deviceType) {
  136. // Convert ops from TensorTransform to TensorOperation
  137. info_ = std::make_shared<ExtraInfo>();
  138. if (deviceType == MapTargetDevice::kCpu) {
  139. (void)std::transform(ops.begin(), ops.end(), std::back_inserter(ops_),
  140. [](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
  141. return operation->Parse();
  142. });
  143. } else {
  144. for (auto &op : ops) {
  145. ops_.emplace_back(op->Parse(deviceType));
  146. }
  147. }
  148. device_type_ = deviceType;
  149. #ifdef ENABLE_ACL
  150. if (device_type_ == MapTargetDevice::kAscend310) {
  151. device_resource_ = std::make_shared<AscendResource>();
  152. Status rc = device_resource_->InitResource();
  153. if (!rc.IsOk()) {
  154. device_resource_ = nullptr;
  155. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  156. }
  157. }
  158. #endif
  159. }
  160. Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice deviceType) {
  161. // Convert ops from TensorTransform to TensorOperation
  162. info_ = std::make_shared<ExtraInfo>();
  163. if (deviceType == MapTargetDevice::kCpu) {
  164. (void)std::transform(
  165. ops.begin(), ops.end(), std::back_inserter(ops_),
  166. [](TensorTransform &operation) -> std::shared_ptr<TensorOperation> { return operation.Parse(); });
  167. } else {
  168. for (auto &op : ops) {
  169. ops_.emplace_back(op.get().Parse(deviceType));
  170. }
  171. }
  172. device_type_ = deviceType;
  173. #ifdef ENABLE_ACL
  174. if (device_type_ == MapTargetDevice::kAscend310) {
  175. device_resource_ = std::make_shared<AscendResource>();
  176. Status rc = device_resource_->InitResource();
  177. if (!rc.IsOk()) {
  178. device_resource_ = nullptr;
  179. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  180. }
  181. }
  182. #endif
  183. }
  184. // Execute function for the example vector case: auto decode(new vision::Decode());
  185. Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType) {
  186. // Convert ops from TensorTransform to TensorOperation
  187. info_ = std::make_shared<ExtraInfo>();
  188. if (deviceType == MapTargetDevice::kCpu) {
  189. (void)std::transform(
  190. ops.begin(), ops.end(), std::back_inserter(ops_),
  191. [](TensorTransform *operation) -> std::shared_ptr<TensorOperation> { return operation->Parse(); });
  192. } else {
  193. for (auto &op : ops) {
  194. ops_.emplace_back(op->Parse(deviceType));
  195. }
  196. }
  197. device_type_ = deviceType;
  198. #ifdef ENABLE_ACL
  199. if (device_type_ == MapTargetDevice::kAscend310) {
  200. device_resource_ = std::make_shared<AscendResource>();
  201. Status rc = device_resource_->InitResource();
  202. if (!rc.IsOk()) {
  203. device_resource_ = nullptr;
  204. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  205. }
  206. }
  207. #endif
  208. }
  209. Execute::~Execute() {
  210. #ifdef ENABLE_ACL
  211. if (device_type_ == MapTargetDevice::kAscend310) {
  212. if (device_resource_) {
  213. device_resource_->FinalizeResource();
  214. } else {
  215. MS_LOG(ERROR) << "Device resource is nullptr which is illegal under case Ascend310";
  216. }
  217. }
  218. #endif
  219. }
  220. Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) {
  221. // Validate input tensor
  222. CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data");
  223. CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
  224. CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");
  225. // Validate and build runtime ops
  226. std::vector<std::shared_ptr<TensorOp>> transforms; // record the transformations
  227. for (int32_t i = 0; i < ops_.size(); i++) {
  228. CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null");
  229. RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
  230. transforms.emplace_back(ops_[i]->Build());
  231. }
  232. if (device_type_ == MapTargetDevice::kCpu) {
  233. // Convert mindspore::Tensor to dataset::Tensor
  234. std::shared_ptr<dataset::Tensor> de_tensor;
  235. Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()),
  236. MSTypeToDEType(static_cast<TypeId>(input.DataType())),
  237. (const uchar *)(input.Data().get()), input.DataSize(), &de_tensor);
  238. if (rc.IsError()) {
  239. MS_LOG(ERROR) << rc;
  240. return rc;
  241. }
  242. // Apply transforms on tensor
  243. for (auto &t : transforms) {
  244. std::shared_ptr<dataset::Tensor> de_output;
  245. Status rc_ = t->Compute(de_tensor, &de_output);
  246. if (rc_.IsError()) {
  247. MS_LOG(ERROR) << rc_;
  248. return rc_;
  249. }
  250. // For next transform
  251. de_tensor = std::move(de_output);
  252. }
  253. // Convert dataset::Tensor to mindspore::Tensor
  254. CHECK_FAIL_RETURN_UNEXPECTED(de_tensor->HasData(), "Apply transform failed, output tensor has no data");
  255. *output = mindspore::MSTensor(std::make_shared<DETensor>(de_tensor));
  256. } else { // Ascend310 case, where we must set Ascend resource on each operators
  257. #ifdef ENABLE_ACL
  258. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  259. std::shared_ptr<mindspore::dataset::DeviceTensor> device_input;
  260. RETURN_IF_NOT_OK(device_resource_->Sink(input, &device_input));
  261. for (auto &t : transforms) {
  262. std::shared_ptr<DeviceTensor> device_output;
  263. RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
  264. RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
  265. // For next transform
  266. device_input = std::move(device_output);
  267. }
  268. CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
  269. std::shared_ptr<mindspore::dataset::Tensor> host_output;
  270. // Need to optimize later, waiting for computing department development, hence we pop data temporarily.
  271. RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
  272. *output = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
  273. // *output = mindspore::MSTensor(std::make_shared<DETensor>(device_input, true)); Use in the future
  274. #endif
  275. }
  276. return Status::OK();
  277. }
  278. Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::vector<MSTensor> *output_tensor_list) {
  279. // Validate input tensor
  280. CHECK_FAIL_RETURN_UNEXPECTED(!input_tensor_list.empty(), "Input Tensor is not valid");
  281. for (auto &tensor : input_tensor_list) {
  282. CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data");
  283. }
  284. CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
  285. CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");
  286. // Validate and build runtime ops
  287. std::vector<std::shared_ptr<TensorOp>> transforms;
  288. for (int32_t i = 0; i < ops_.size(); i++) {
  289. CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null");
  290. RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
  291. transforms.emplace_back(ops_[i]->Build());
  292. }
  293. if (device_type_ == MapTargetDevice::kCpu) { // Case CPU
  294. TensorRow de_tensor_list;
  295. for (auto &tensor : input_tensor_list) {
  296. std::shared_ptr<dataset::Tensor> de_tensor;
  297. Status rc = dataset::Tensor::CreateFromMemory(
  298. dataset::TensorShape(tensor.Shape()), MSTypeToDEType(static_cast<TypeId>(tensor.DataType())),
  299. (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_tensor);
  300. if (rc.IsError()) {
  301. MS_LOG(ERROR) << rc;
  302. RETURN_IF_NOT_OK(rc);
  303. }
  304. de_tensor_list.emplace_back(std::move(de_tensor));
  305. }
  306. // Apply transforms on tensor
  307. for (auto &t : transforms) {
  308. TensorRow de_output_list;
  309. RETURN_IF_NOT_OK(t->Compute(de_tensor_list, &de_output_list));
  310. // For next transform
  311. de_tensor_list = std::move(de_output_list);
  312. }
  313. for (auto &tensor : de_tensor_list) {
  314. CHECK_FAIL_RETURN_UNEXPECTED(tensor->HasData(), "Apply transform failed, output tensor has no data");
  315. auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(tensor));
  316. output_tensor_list->emplace_back(ms_tensor);
  317. }
  318. CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid");
  319. } else { // Case Ascend310
  320. #ifdef ENABLE_ACL
  321. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  322. for (auto &input_tensor : input_tensor_list) {
  323. std::shared_ptr<dataset::DeviceTensor> device_input;
  324. RETURN_IF_NOT_OK(device_resource_->Sink(input_tensor, &device_input));
  325. for (auto &t : transforms) {
  326. std::shared_ptr<DeviceTensor> device_output;
  327. RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
  328. RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
  329. // For next transform
  330. device_input = std::move(device_output);
  331. }
  332. CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
  333. // Due to the limitation of Ascend310 memory, we have to pop every data onto host memory
  334. // So the speed of this batch method is slower than solo mode
  335. std::shared_ptr<mindspore::dataset::Tensor> host_output;
  336. RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
  337. auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
  338. output_tensor_list->emplace_back(ms_tensor);
  339. RETURN_IF_NOT_OK(device_resource_->DeviceDataRelease());
  340. }
  341. CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor vector is empty");
  342. #endif
  343. }
  344. return Status::OK();
  345. }
  346. std::vector<uint32_t> AippSizeFilter(const std::vector<uint32_t> &resize_para, const std::vector<uint32_t> &crop_para) {
  347. std::vector<uint32_t> aipp_size;
  348. if (resize_para.size() == 0) {
  349. aipp_size = crop_para;
  350. } else if (crop_para.size() == 0) {
  351. aipp_size = resize_para;
  352. } else {
  353. if (resize_para.size() == 1) {
  354. aipp_size = *min_element(crop_para.begin(), crop_para.end()) < *resize_para.begin() ? crop_para : resize_para;
  355. } else {
  356. aipp_size =
  357. *min_element(resize_para.begin(), resize_para.end()) < *min_element(crop_para.begin(), crop_para.end())
  358. ? resize_para
  359. : crop_para;
  360. }
  361. }
  362. return aipp_size;
  363. }
  364. std::vector<uint32_t> AippMeanFilter(const std::vector<uint32_t> &normalize_para) {
  365. std::vector<uint32_t> aipp_mean;
  366. if (normalize_para.size() == 6) {
  367. std::transform(normalize_para.begin(), normalize_para.begin() + 3, std::back_inserter(aipp_mean),
  368. [](uint32_t i) { return static_cast<uint32_t>(i / 10000); });
  369. } else {
  370. aipp_mean = {0, 0, 0};
  371. }
  372. return aipp_mean;
  373. }
  374. std::vector<float> AippStdFilter(const std::vector<uint32_t> &normalize_para) {
  375. std::vector<float> aipp_std;
  376. if (normalize_para.size() == 6) {
  377. auto zeros = std::find(std::begin(normalize_para), std::end(normalize_para), 0);
  378. if (zeros == std::end(normalize_para)) {
  379. std::transform(normalize_para.begin() + 3, normalize_para.end(), std::back_inserter(aipp_std),
  380. [](uint32_t i) { return static_cast<float>(10000 / i); });
  381. } else {
  382. MS_LOG(WARNING) << "Detect 0 in std vector, please verify your input";
  383. aipp_std = {1.0, 1.0, 1.0};
  384. }
  385. } else {
  386. aipp_std = {1.0, 1.0, 1.0};
  387. }
  388. return aipp_std;
  389. }
  390. Status AippInfoCollection(std::map<std::string, std::string> *aipp_options, const std::vector<uint32_t> &aipp_size,
  391. const std::vector<uint32_t> &aipp_mean, const std::vector<float> &aipp_std) {
  392. aipp_options->insert(std::make_pair("related_input_rank", "0"));
  393. aipp_options->insert(std::make_pair("src_image_size_w", std::to_string(aipp_size[1])));
  394. aipp_options->insert(std::make_pair("src_image_size_h", std::to_string(aipp_size[1])));
  395. aipp_options->insert(std::make_pair("crop", "false"));
  396. aipp_options->insert(std::make_pair("input_format", "YUV420SP_U8"));
  397. aipp_options->insert(std::make_pair("aipp_mode", "static"));
  398. aipp_options->insert(std::make_pair("csc_switch", "true"));
  399. aipp_options->insert(std::make_pair("rbuv_swap_switch", "false"));
  400. std::vector<int32_t> color_space_matrix = {256, 0, 359, 256, -88, -183, 256, 454, 0};
  401. int count = 0;
  402. for (int i = 0; i < 3; i++) {
  403. for (int j = 0; j < 3; j++) {
  404. std::string key_word = "matrix_r" + std::to_string(i) + "c" + std::to_string(j);
  405. aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_matrix[count])));
  406. ++count;
  407. }
  408. }
  409. std::vector<uint32_t> color_space_bias = {0, 128, 128};
  410. for (int i = 0; i < 3; i++) {
  411. std::string key_word = "input_bias_" + std::to_string(i);
  412. aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_bias[i])));
  413. }
  414. for (int i = 0; i < aipp_mean.size(); i++) {
  415. std::string key_word = "mean_chn_" + std::to_string(i);
  416. aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_mean[i])));
  417. }
  418. for (int i = 0; i < aipp_mean.size(); i++) {
  419. std::string key_word = "min_chn_" + std::to_string(i);
  420. aipp_options->insert(std::make_pair(key_word, "0.0"));
  421. }
  422. for (int i = 0; i < aipp_std.size(); i++) {
  423. std::string key_word = "var_reci_chn_" + std::to_string(i);
  424. aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_std[i])));
  425. }
  426. return Status::OK();
  427. }
  428. std::string Execute::AippCfgGenerator() {
  429. std::string config_location = "./aipp.cfg";
  430. #ifdef ENABLE_ACL
  431. std::vector<uint32_t> paras; // Record the parameters value of each Ascend operators
  432. for (int32_t i = 0; i < ops_.size(); i++) {
  433. json ir_info;
  434. if (ops_[i] == nullptr) {
  435. MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] is null";
  436. return "";
  437. }
  438. if (ops_[i]->ValidateParams() != Status::OK()) {
  439. MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] has wrong parameters";
  440. return "";
  441. }
  442. ops_[i]->to_json(&ir_info);
  443. std::multimap<std::string, std::string> op_list = {{vision::kDvppCropJpegOperation, "size"},
  444. {vision::kDvppDecodeResizeOperation, "size"},
  445. {vision::kDvppDecodeResizeCropOperation, "crop_size"},
  446. {vision::kDvppDecodeResizeCropOperation, "resize_size"},
  447. {vision::kDvppNormalizeOperation, "mean"},
  448. {vision::kDvppNormalizeOperation, "std"},
  449. {vision::kDvppResizeJpegOperation, "size"}};
  450. for (auto pos = op_list.equal_range(ops_[i]->Name()); pos.first != pos.second; ++pos.first) {
  451. auto paras_key_word = pos.first->second;
  452. paras = ir_info[paras_key_word].get<std::vector<uint32_t>>();
  453. info_->aipp_cfg_.insert(std::make_pair(ops_[i]->Name(), paras));
  454. }
  455. }
  456. std::ofstream outfile;
  457. outfile.open(config_location, std::ofstream::out);
  458. if (!outfile.is_open()) {
  459. MS_LOG(ERROR) << "Fail to open Aipp config file, please verify your system config(including authority)"
  460. << "We will return empty string which represent the location of Aipp config file in this case";
  461. std::string except = "";
  462. return except;
  463. }
  464. if (device_type_ == MapTargetDevice::kAscend310) {
  465. // Process resize parameters and crop parameters to find out the final size of input data
  466. std::vector<uint32_t> resize_paras;
  467. std::vector<uint32_t> crop_paras;
  468. auto iter = info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation);
  469. if (iter != info_->aipp_cfg_.end()) {
  470. resize_paras = iter->second;
  471. }
  472. iter = info_->aipp_cfg_.find(vision::kDvppCropJpegOperation);
  473. if (iter != info_->aipp_cfg_.end()) {
  474. crop_paras = iter->second;
  475. if (crop_paras.size() == 1) {
  476. crop_paras.emplace_back(crop_paras[0]);
  477. }
  478. }
  479. std::vector<uint32_t> aipp_size = AippSizeFilter(resize_paras, crop_paras);
  480. // Process normalization parameters to find out the final normalization parameters for Aipp module
  481. std::vector<uint32_t> normalize_paras;
  482. if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) {
  483. for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second;
  484. ++pos.first) {
  485. auto mean_or_std = pos.first->second;
  486. normalize_paras.insert(normalize_paras.end(), mean_or_std.begin(), mean_or_std.end());
  487. }
  488. }
  489. std::vector<uint32_t> aipp_mean = AippMeanFilter(normalize_paras);
  490. std::vector<float> aipp_std = AippStdFilter(normalize_paras);
  491. std::map<std::string, std::string> aipp_options;
  492. AippInfoCollection(&aipp_options, aipp_size, aipp_mean, aipp_std);
  493. std::string tab_char(4, ' ');
  494. outfile << "aipp_op {" << std::endl;
  495. for (auto &option : aipp_options) {
  496. outfile << tab_char << option.first << " : " << option.second << std::endl;
  497. }
  498. outfile << "}";
  499. outfile.close();
  500. }
  501. #endif
  502. return config_location;
  503. }
  504. Status Execute::validate_device_() {
  505. if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310) {
  506. std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";
  507. MS_LOG(ERROR) << err_msg;
  508. RETURN_STATUS_UNEXPECTED(err_msg);
  509. }
  510. return Status::OK();
  511. }
  512. Status Execute::DeviceMemoryRelease() {
  513. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  514. Status rc = device_resource_->DeviceDataRelease();
  515. if (rc.IsError()) {
  516. std::string err_msg = "Error in device data release";
  517. MS_LOG(ERROR) << err_msg;
  518. RETURN_STATUS_UNEXPECTED(err_msg);
  519. }
  520. return Status::OK();
  521. }
  522. } // namespace dataset
  523. } // namespace mindspore