You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

execute.cc 26 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <algorithm>
  17. #include <fstream>
  18. #include "minddata/dataset/include/execute.h"
  19. #include "minddata/dataset/core/de_tensor.h"
  20. #include "minddata/dataset/core/device_resource.h"
  21. #include "minddata/dataset/core/device_tensor.h"
  22. #include "minddata/dataset/core/tensor_row.h"
  23. #include "minddata/dataset/core/tensor.h"
  24. #include "minddata/dataset/core/type_id.h"
  25. #include "minddata/dataset/kernels/ir/tensor_operation.h"
  26. #include "minddata/dataset/kernels/tensor_op.h"
  27. #ifndef ENABLE_ANDROID
  28. #include "utils/log_adapter.h"
  29. #else
  30. #include "mindspore/lite/src/common/log_adapter.h"
  31. #endif
  32. #ifdef ENABLE_ACL
  33. #include "minddata/dataset/core/ascend_resource.h"
  34. #include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
  35. #include "minddata/dataset/kernels/ir/vision/ascend_vision_ir.h"
  36. #endif
  37. namespace mindspore {
  38. namespace dataset {
  39. using json = nlohmann::json;
  40. struct Execute::ExtraInfo {
  41. std::multimap<std::string, std::vector<uint32_t>> aipp_cfg_;
  42. bool init_with_shared_ptr_ = true; // Initial execute object with shared_ptr as default
  43. #ifdef ENABLE_ACL
  44. std::multimap<std::string, std::string> op2para_map_ = {{vision::kDvppCropJpegOperation, "size"},
  45. {vision::kDvppDecodeResizeOperation, "size"},
  46. {vision::kDvppDecodeResizeCropOperation, "crop_size"},
  47. {vision::kDvppDecodeResizeCropOperation, "resize_size"},
  48. {vision::kDvppNormalizeOperation, "mean"},
  49. {vision::kDvppNormalizeOperation, "std"},
  50. {vision::kDvppResizeJpegOperation, "size"}};
  51. #endif
  52. };
  53. // FIXME - Temporarily overload Execute to support both TensorOperation and TensorTransform
  54. Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice deviceType, uint32_t device_id) {
  55. ops_.emplace_back(std::move(op));
  56. device_type_ = deviceType;
  57. info_ = std::make_shared<ExtraInfo>();
  58. #ifdef ENABLE_ACL
  59. if (device_type_ == MapTargetDevice::kAscend310) {
  60. device_resource_ = std::make_shared<AscendResource>();
  61. Status rc = device_resource_->InitResource(device_id);
  62. if (!rc.IsOk()) {
  63. device_resource_ = nullptr;
  64. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  65. }
  66. }
  67. #endif
  68. }
  69. Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice deviceType, uint32_t device_id) {
  70. // Initialize the op and other context
  71. transforms_.emplace_back(op);
  72. info_ = std::make_shared<ExtraInfo>();
  73. device_type_ = deviceType;
  74. #ifdef ENABLE_ACL
  75. if (device_type_ == MapTargetDevice::kAscend310) {
  76. device_resource_ = std::make_shared<AscendResource>();
  77. Status rc = device_resource_->InitResource(device_id);
  78. if (!rc.IsOk()) {
  79. device_resource_ = nullptr;
  80. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  81. }
  82. }
  83. #endif
  84. }
  85. Execute::Execute(std::reference_wrapper<TensorTransform> op, MapTargetDevice deviceType, uint32_t device_id) {
  86. // Initialize the transforms_ and other context
  87. std::shared_ptr<TensorOperation> operation = op.get().Parse();
  88. ops_.emplace_back(std::move(operation));
  89. info_ = std::make_shared<ExtraInfo>();
  90. info_->init_with_shared_ptr_ = false;
  91. device_type_ = deviceType;
  92. #ifdef ENABLE_ACL
  93. if (device_type_ == MapTargetDevice::kAscend310) {
  94. device_resource_ = std::make_shared<AscendResource>();
  95. Status rc = device_resource_->InitResource(device_id);
  96. if (!rc.IsOk()) {
  97. device_resource_ = nullptr;
  98. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  99. }
  100. }
  101. #endif
  102. }
  103. // Execute function for the example case: auto decode(new vision::Decode());
  104. Execute::Execute(TensorTransform *op, MapTargetDevice deviceType, uint32_t device_id) {
  105. // Initialize the transforms_ and other context
  106. std::shared_ptr<TensorTransform> smart_ptr_op(op);
  107. transforms_.emplace_back(smart_ptr_op);
  108. info_ = std::make_shared<ExtraInfo>();
  109. device_type_ = deviceType;
  110. #ifdef ENABLE_ACL
  111. if (device_type_ == MapTargetDevice::kAscend310) {
  112. device_resource_ = std::make_shared<AscendResource>();
  113. Status rc = device_resource_->InitResource(device_id);
  114. if (!rc.IsOk()) {
  115. device_resource_ = nullptr;
  116. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  117. }
  118. }
  119. #endif
  120. }
  121. Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDevice deviceType, uint32_t device_id)
  122. : ops_(std::move(ops)), device_type_(deviceType) {
  123. info_ = std::make_shared<ExtraInfo>();
  124. #ifdef ENABLE_ACL
  125. if (device_type_ == MapTargetDevice::kAscend310) {
  126. device_resource_ = std::make_shared<AscendResource>();
  127. Status rc = device_resource_->InitResource(device_id);
  128. if (!rc.IsOk()) {
  129. device_resource_ = nullptr;
  130. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  131. }
  132. }
  133. #endif
  134. }
  135. Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice deviceType, uint32_t device_id) {
  136. // Initialize the transforms_ and other context
  137. transforms_ = ops;
  138. info_ = std::make_shared<ExtraInfo>();
  139. device_type_ = deviceType;
  140. #ifdef ENABLE_ACL
  141. if (device_type_ == MapTargetDevice::kAscend310) {
  142. device_resource_ = std::make_shared<AscendResource>();
  143. Status rc = device_resource_->InitResource(device_id);
  144. if (!rc.IsOk()) {
  145. device_resource_ = nullptr;
  146. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  147. }
  148. }
  149. #endif
  150. }
  151. Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice deviceType,
  152. uint32_t device_id) {
  153. // Initialize the transforms_ and other context
  154. if (deviceType == MapTargetDevice::kCpu) {
  155. (void)std::transform(
  156. ops.begin(), ops.end(), std::back_inserter(ops_),
  157. [](TensorTransform &operation) -> std::shared_ptr<TensorOperation> { return operation.Parse(); });
  158. } else {
  159. for (auto &op : ops) {
  160. ops_.emplace_back(op.get().Parse(deviceType));
  161. }
  162. }
  163. info_ = std::make_shared<ExtraInfo>();
  164. info_->init_with_shared_ptr_ = false;
  165. device_type_ = deviceType;
  166. #ifdef ENABLE_ACL
  167. if (device_type_ == MapTargetDevice::kAscend310) {
  168. device_resource_ = std::make_shared<AscendResource>();
  169. Status rc = device_resource_->InitResource(device_id);
  170. if (!rc.IsOk()) {
  171. device_resource_ = nullptr;
  172. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  173. }
  174. }
  175. #endif
  176. }
  177. // Execute function for the example vector case: auto decode(new vision::Decode());
  178. Execute::Execute(std::vector<TensorTransform *> ops, MapTargetDevice deviceType, uint32_t device_id) {
  179. // Initialize the transforms_ and other context
  180. for (auto &op : ops) {
  181. std::shared_ptr<TensorTransform> smart_ptr_op(op);
  182. transforms_.emplace_back(smart_ptr_op);
  183. }
  184. info_ = std::make_shared<ExtraInfo>();
  185. device_type_ = deviceType;
  186. #ifdef ENABLE_ACL
  187. if (device_type_ == MapTargetDevice::kAscend310) {
  188. device_resource_ = std::make_shared<AscendResource>();
  189. Status rc = device_resource_->InitResource(device_id);
  190. if (!rc.IsOk()) {
  191. device_resource_ = nullptr;
  192. MS_LOG(ERROR) << "Initialize Ascend310 resource fail";
  193. }
  194. }
  195. #endif
  196. }
  197. Execute::~Execute() {
  198. #ifdef ENABLE_ACL
  199. if (device_type_ == MapTargetDevice::kAscend310) {
  200. if (device_resource_) {
  201. device_resource_->FinalizeResource();
  202. } else {
  203. MS_LOG(ERROR) << "Device resource is nullptr which is illegal under case Ascend310";
  204. }
  205. }
  206. #endif
  207. }
  208. Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) {
  209. // Validate input tensor
  210. CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data");
  211. CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");
  212. // Parse TensorTransform transforms_ into TensorOperation ops_
  213. if (info_->init_with_shared_ptr_) {
  214. RETURN_IF_NOT_OK(ParseTransforms_());
  215. info_->init_with_shared_ptr_ = false;
  216. }
  217. CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
  218. // Validate and build runtime ops
  219. std::vector<std::shared_ptr<TensorOp>> transforms; // record the transformations
  220. std::map<MapTargetDevice, std::string> env_list = {
  221. {MapTargetDevice::kCpu, "kCpu"}, {MapTargetDevice::kGpu, "kGpu"}, {MapTargetDevice::kAscend310, "kAscend310"}};
  222. for (int32_t i = 0; i < ops_.size(); i++) {
  223. if (ops_[i] == nullptr) {
  224. MS_LOG(ERROR) << "Input TensorOperation["
  225. << std::to_string(i) + "] is unsupported on your input device:" << env_list.at(device_type_);
  226. }
  227. CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null");
  228. RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
  229. transforms.emplace_back(ops_[i]->Build());
  230. }
  231. if (device_type_ == MapTargetDevice::kCpu) {
  232. // Convert mindspore::Tensor to dataset::Tensor
  233. std::shared_ptr<dataset::Tensor> de_tensor;
  234. Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()),
  235. MSTypeToDEType(static_cast<TypeId>(input.DataType())),
  236. (const uchar *)(input.Data().get()), input.DataSize(), &de_tensor);
  237. if (rc.IsError()) {
  238. MS_LOG(ERROR) << rc;
  239. return rc;
  240. }
  241. // Apply transforms on tensor
  242. for (auto &t : transforms) {
  243. std::shared_ptr<dataset::Tensor> de_output;
  244. Status rc_ = t->Compute(de_tensor, &de_output);
  245. if (rc_.IsError()) {
  246. MS_LOG(ERROR) << rc_;
  247. return rc_;
  248. }
  249. // For next transform
  250. de_tensor = std::move(de_output);
  251. }
  252. // Convert dataset::Tensor to mindspore::Tensor
  253. CHECK_FAIL_RETURN_UNEXPECTED(de_tensor->HasData(), "Apply transform failed, output tensor has no data");
  254. *output = mindspore::MSTensor(std::make_shared<DETensor>(de_tensor));
  255. } else { // Ascend310 case, where we must set Ascend resource on each operators
  256. #ifdef ENABLE_ACL
  257. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  258. // Sink data from host into device
  259. std::shared_ptr<mindspore::dataset::DeviceTensor> device_input;
  260. RETURN_IF_NOT_OK(device_resource_->Sink(input, &device_input));
  261. for (auto &t : transforms) {
  262. // Initialize AscendResource for each operators
  263. std::shared_ptr<DeviceTensor> device_output;
  264. RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
  265. RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
  266. // For next transform
  267. device_input = std::move(device_output);
  268. }
  269. CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
  270. *output = mindspore::MSTensor(std::make_shared<DETensor>(device_input, true));
  271. #endif
  272. }
  273. return Status::OK();
  274. }
  275. Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::vector<MSTensor> *output_tensor_list) {
  276. // Validate input tensor
  277. CHECK_FAIL_RETURN_UNEXPECTED(!input_tensor_list.empty(), "Input Tensor is not valid");
  278. for (auto &tensor : input_tensor_list) {
  279. CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data");
  280. }
  281. CHECK_FAIL_RETURN_UNEXPECTED(validate_device_(), "Device Type should be 'Ascend310' or 'CPU'");
  282. // Parse TensorTransform transforms_ into TensorOperation ops_
  283. if (info_->init_with_shared_ptr_) {
  284. RETURN_IF_NOT_OK(ParseTransforms_());
  285. info_->init_with_shared_ptr_ = false;
  286. }
  287. CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided");
  288. std::map<MapTargetDevice, std::string> env_list = {
  289. {MapTargetDevice::kCpu, "kCpu"}, {MapTargetDevice::kGpu, "kGpu"}, {MapTargetDevice::kAscend310, "kAscend310"}};
  290. // Validate and build runtime ops
  291. std::vector<std::shared_ptr<TensorOp>> transforms;
  292. for (int32_t i = 0; i < ops_.size(); i++) {
  293. if (ops_[i] == nullptr) {
  294. MS_LOG(ERROR) << "Input TensorOperation["
  295. << std::to_string(i) + "] is unsupported on your input device:" << env_list.at(device_type_);
  296. }
  297. CHECK_FAIL_RETURN_UNEXPECTED(ops_[i] != nullptr, "Input TensorOperation[" + std::to_string(i) + "] is null");
  298. RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
  299. transforms.emplace_back(ops_[i]->Build());
  300. }
  301. if (device_type_ == MapTargetDevice::kCpu) { // Case CPU
  302. TensorRow de_tensor_list;
  303. for (auto &tensor : input_tensor_list) {
  304. std::shared_ptr<dataset::Tensor> de_tensor;
  305. Status rc = dataset::Tensor::CreateFromMemory(
  306. dataset::TensorShape(tensor.Shape()), MSTypeToDEType(static_cast<TypeId>(tensor.DataType())),
  307. (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_tensor);
  308. if (rc.IsError()) {
  309. MS_LOG(ERROR) << rc;
  310. RETURN_IF_NOT_OK(rc);
  311. }
  312. de_tensor_list.emplace_back(std::move(de_tensor));
  313. }
  314. // Apply transforms on tensor
  315. for (auto &t : transforms) {
  316. TensorRow de_output_list;
  317. RETURN_IF_NOT_OK(t->Compute(de_tensor_list, &de_output_list));
  318. // For next transform
  319. de_tensor_list = std::move(de_output_list);
  320. }
  321. for (auto &tensor : de_tensor_list) {
  322. CHECK_FAIL_RETURN_UNEXPECTED(tensor->HasData(), "Apply transform failed, output tensor has no data");
  323. auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(tensor));
  324. output_tensor_list->emplace_back(ms_tensor);
  325. }
  326. CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid");
  327. } else { // Case Ascend310
  328. #ifdef ENABLE_ACL
  329. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  330. for (auto &input_tensor : input_tensor_list) {
  331. // Sink each data from host into device
  332. std::shared_ptr<dataset::DeviceTensor> device_input;
  333. RETURN_IF_NOT_OK(device_resource_->Sink(input_tensor, &device_input));
  334. for (auto &t : transforms) {
  335. std::shared_ptr<DeviceTensor> device_output;
  336. RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
  337. RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
  338. // For next transform
  339. device_input = std::move(device_output);
  340. }
  341. CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
  342. // Due to the limitation of Ascend310 memory, we have to pop every data onto host memory
  343. // So the speed of this batch method is slower than solo mode
  344. std::shared_ptr<mindspore::dataset::Tensor> host_output;
  345. RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
  346. auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
  347. output_tensor_list->emplace_back(ms_tensor);
  348. // Release the data on the device because we have copied one piece onto host
  349. RETURN_IF_NOT_OK(device_resource_->DeviceDataRelease());
  350. }
  351. CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor vector is empty");
  352. #endif
  353. }
  354. return Status::OK();
  355. }
  356. std::vector<uint32_t> AippSizeFilter(const std::vector<uint32_t> &resize_para, const std::vector<uint32_t> &crop_para) {
  357. std::vector<uint32_t> aipp_size;
  358. // Special condition where (no Crop and no Resize) or (no Crop and resize with fixed ratio) will lead to dynamic input
  359. if ((resize_para.size() == 0 || resize_para.size() == 1) && crop_para.size() == 0) {
  360. aipp_size = {0, 0};
  361. MS_LOG(WARNING) << "Dynamic input shape is not supported, incomplete aipp config file will be generated. Please "
  362. "checkout your TensorTransform input, both src_image_size_h and src_image_size will be 0";
  363. return aipp_size;
  364. }
  365. if (resize_para.size() == 0) { // If only Crop operator exists
  366. aipp_size = crop_para;
  367. } else if (crop_para.size() == 0) { // If only Resize operator with 2 parameters exists
  368. aipp_size = resize_para;
  369. } else { // If both of them exist
  370. if (resize_para.size() == 1) {
  371. aipp_size = crop_para;
  372. } else {
  373. aipp_size =
  374. *min_element(resize_para.begin(), resize_para.end()) < *min_element(crop_para.begin(), crop_para.end())
  375. ? resize_para
  376. : crop_para;
  377. }
  378. }
  379. #ifdef ENABLE_ACL
  380. aipp_size[0] = DVPP_ALIGN_UP(aipp_size[0], VPC_HEIGHT_ALIGN); // H
  381. aipp_size[1] = DVPP_ALIGN_UP(aipp_size[1], VPC_WIDTH_ALIGN); // W
  382. #endif
  383. return aipp_size;
  384. }
  385. std::vector<uint32_t> AippMeanFilter(const std::vector<uint32_t> &normalize_para) {
  386. std::vector<uint32_t> aipp_mean;
  387. if (normalize_para.size() == 6) { // If Normalize operator exist
  388. std::transform(normalize_para.begin(), normalize_para.begin() + 3, std::back_inserter(aipp_mean),
  389. [](uint32_t i) { return static_cast<uint32_t>(i / 10000); });
  390. } else {
  391. aipp_mean = {0, 0, 0};
  392. }
  393. return aipp_mean;
  394. }
  395. std::vector<float> AippStdFilter(const std::vector<uint32_t> &normalize_para) {
  396. std::vector<float> aipp_std;
  397. if (normalize_para.size() == 6) { // If Normalize operator exist
  398. auto zeros = std::find(std::begin(normalize_para), std::end(normalize_para), 0);
  399. if (zeros == std::end(normalize_para)) {
  400. std::transform(normalize_para.begin() + 3, normalize_para.end(), std::back_inserter(aipp_std),
  401. [](uint32_t i) { return 10000 / static_cast<float>(i); });
  402. } else { // If 0 occurs in std vector
  403. MS_LOG(WARNING) << "Detect 0 in std vector, please verify your input";
  404. aipp_std = {1.0, 1.0, 1.0};
  405. }
  406. } else {
  407. aipp_std = {1.0, 1.0, 1.0};
  408. }
  409. return aipp_std;
  410. }
  411. Status AippInfoCollection(std::map<std::string, std::string> *aipp_options, const std::vector<uint32_t> &aipp_size,
  412. const std::vector<uint32_t> &aipp_mean, const std::vector<float> &aipp_std) {
  413. // Several aipp config parameters
  414. aipp_options->insert(std::make_pair("related_input_rank", "0"));
  415. aipp_options->insert(std::make_pair("src_image_size_w", std::to_string(aipp_size[1])));
  416. aipp_options->insert(std::make_pair("src_image_size_h", std::to_string(aipp_size[0])));
  417. aipp_options->insert(std::make_pair("crop", "false"));
  418. aipp_options->insert(std::make_pair("input_format", "YUV420SP_U8"));
  419. aipp_options->insert(std::make_pair("aipp_mode", "static"));
  420. aipp_options->insert(std::make_pair("csc_switch", "true"));
  421. aipp_options->insert(std::make_pair("rbuv_swap_switch", "false"));
  422. // Y = AX + b, this part is A
  423. std::vector<int32_t> color_space_matrix = {256, 0, 359, 256, -88, -183, 256, 454, 0};
  424. int count = 0;
  425. for (int i = 0; i < 3; i++) {
  426. for (int j = 0; j < 3; j++) {
  427. std::string key_word = "matrix_r" + std::to_string(i) + "c" + std::to_string(j);
  428. aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_matrix[count])));
  429. ++count;
  430. }
  431. }
  432. // This part is b
  433. std::vector<uint32_t> color_space_bias = {0, 128, 128};
  434. for (int i = 0; i < 3; i++) {
  435. std::string key_word = "input_bias_" + std::to_string(i);
  436. aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_bias[i])));
  437. }
  438. // Y = (X - mean - min) * [std^(-1)], this part is mean
  439. for (int i = 0; i < aipp_mean.size(); i++) {
  440. std::string key_word = "mean_chn_" + std::to_string(i);
  441. aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_mean[i])));
  442. }
  443. // This part is min
  444. for (int i = 0; i < aipp_mean.size(); i++) {
  445. std::string key_word = "min_chn_" + std::to_string(i);
  446. aipp_options->insert(std::make_pair(key_word, "0.0"));
  447. }
  448. // This part is std^(-1)
  449. for (int i = 0; i < aipp_std.size(); i++) {
  450. std::string key_word = "var_reci_chn_" + std::to_string(i);
  451. aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_std[i])));
  452. }
  453. return Status::OK();
  454. }
  455. std::string Execute::AippCfgGenerator() {
  456. std::string config_location = "./aipp.cfg";
  457. #ifdef ENABLE_ACL
  458. if (info_->init_with_shared_ptr_) {
  459. ParseTransforms_();
  460. info_->init_with_shared_ptr_ = false;
  461. }
  462. std::vector<uint32_t> paras; // Record the parameters value of each Ascend operators
  463. for (int32_t i = 0; i < ops_.size(); i++) {
  464. // Validate operator ir
  465. json ir_info;
  466. if (ops_[i] == nullptr) {
  467. MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] is null";
  468. return "";
  469. }
  470. // Define map between operator name and parameter name
  471. ops_[i]->to_json(&ir_info);
  472. // Collect the information of operators
  473. for (auto pos = info_->op2para_map_.equal_range(ops_[i]->Name()); pos.first != pos.second; ++pos.first) {
  474. auto paras_key_word = pos.first->second;
  475. paras = ir_info[paras_key_word].get<std::vector<uint32_t>>();
  476. info_->aipp_cfg_.insert(std::make_pair(ops_[i]->Name(), paras));
  477. }
  478. }
  479. std::ofstream outfile;
  480. outfile.open(config_location, std::ofstream::out);
  481. if (!outfile.is_open()) {
  482. MS_LOG(ERROR) << "Fail to open Aipp config file, please verify your system config(including authority)"
  483. << "We will return empty string which represent the location of Aipp config file in this case";
  484. std::string except = "";
  485. return except;
  486. }
  487. if (device_type_ == MapTargetDevice::kAscend310) {
  488. // Process resize parameters and crop parameters to find out the final size of input data
  489. std::vector<uint32_t> resize_paras;
  490. std::vector<uint32_t> crop_paras;
  491. // Find resize parameters
  492. std::map<std::string, std::vector<uint32_t>>::iterator iter;
  493. if (info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation) != info_->aipp_cfg_.end()) {
  494. iter = info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation);
  495. resize_paras = iter->second;
  496. } else if (info_->aipp_cfg_.find(vision::kDvppDecodeResizeOperation) != info_->aipp_cfg_.end()) {
  497. iter = info_->aipp_cfg_.find(vision::kDvppDecodeResizeOperation);
  498. resize_paras = iter->second;
  499. }
  500. // Find crop parameters
  501. if (info_->aipp_cfg_.find(vision::kDvppCropJpegOperation) != info_->aipp_cfg_.end()) {
  502. iter = info_->aipp_cfg_.find(vision::kDvppCropJpegOperation);
  503. crop_paras = iter->second;
  504. } else if (info_->aipp_cfg_.find(vision::kDvppDecodeResizeCropOperation) != info_->aipp_cfg_.end()) {
  505. iter = info_->aipp_cfg_.find(vision::kDvppDecodeResizeCropOperation);
  506. crop_paras = iter->second;
  507. }
  508. if (crop_paras.size() == 1) {
  509. crop_paras.emplace_back(crop_paras[0]);
  510. }
  511. std::vector<uint32_t> aipp_size = AippSizeFilter(resize_paras, crop_paras);
  512. // Process normalization parameters to find out the final normalization parameters for Aipp module
  513. std::vector<uint32_t> normalize_paras;
  514. if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) {
  515. for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second;
  516. ++pos.first) {
  517. auto mean_or_std = pos.first->second;
  518. normalize_paras.insert(normalize_paras.end(), mean_or_std.begin(), mean_or_std.end());
  519. }
  520. }
  521. std::vector<uint32_t> aipp_mean = AippMeanFilter(normalize_paras);
  522. std::vector<float> aipp_std = AippStdFilter(normalize_paras);
  523. std::map<std::string, std::string> aipp_options;
  524. AippInfoCollection(&aipp_options, aipp_size, aipp_mean, aipp_std);
  525. std::string tab_char(4, ' ');
  526. outfile << "aipp_op {" << std::endl;
  527. for (auto &option : aipp_options) {
  528. outfile << tab_char << option.first << " : " << option.second << std::endl;
  529. }
  530. outfile << "}";
  531. outfile.close();
  532. } else { // For case GPU or CPU
  533. outfile << "aipp_op {" << std::endl << "}";
  534. outfile.close();
  535. MS_LOG(WARNING) << "Your runtime environment is not Ascend310, this config file will lead to undefined behavior on "
  536. "computing result. Please check that.";
  537. }
  538. #endif
  539. return config_location;
  540. }
  541. bool IsEmptyPtr(std::shared_ptr<TensorTransform> api_ptr) { return api_ptr == nullptr; }
  542. Status Execute::ParseTransforms_() {
  543. auto iter = std::find_if(transforms_.begin(), transforms_.end(), IsEmptyPtr);
  544. if (iter != transforms_.end()) {
  545. std::string err_msg = "Your input TensorTransforms contain at least one nullptr, please check your input";
  546. MS_LOG(ERROR) << err_msg;
  547. RETURN_STATUS_UNEXPECTED(err_msg);
  548. }
  549. if (device_type_ == MapTargetDevice::kCpu) {
  550. (void)std::transform(transforms_.begin(), transforms_.end(), std::back_inserter(ops_),
  551. [](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
  552. return operation->Parse();
  553. });
  554. } else {
  555. for (auto &transform_ : transforms_) {
  556. ops_.emplace_back(transform_->Parse(device_type_));
  557. }
  558. }
  559. return Status::OK();
  560. }
  561. Status Execute::validate_device_() {
  562. if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310 &&
  563. device_type_ != MapTargetDevice::kGpu) {
  564. std::string err_msg = "Your input device is not supported. (Option: CPU or GPU or Ascend310)";
  565. MS_LOG(ERROR) << err_msg;
  566. RETURN_STATUS_UNEXPECTED(err_msg);
  567. }
  568. return Status::OK();
  569. }
  570. Status Execute::DeviceMemoryRelease() {
  571. CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310");
  572. Status rc = device_resource_->DeviceDataRelease();
  573. if (rc.IsError()) {
  574. std::string err_msg = "Error in device data release";
  575. MS_LOG(ERROR) << err_msg;
  576. RETURN_STATUS_UNEXPECTED(err_msg);
  577. }
  578. return Status::OK();
  579. }
  580. } // namespace dataset
  581. } // namespace mindspore