You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_de.cc 9.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <iostream>
  17. #include <string>
  18. #include <vector>
  19. #include "common/common_test.h"
  20. #include "include/api/types.h"
  21. #include "minddata/dataset/include/execute.h"
  22. #include "minddata/dataset/include/transforms.h"
  23. #include "minddata/dataset/include/vision.h"
  24. #ifdef ENABLE_ACL
  25. #include "minddata/dataset/include/vision_ascend.h"
  26. #endif
  27. #include "minddata/dataset/kernels/tensor_op.h"
  28. #include "include/api/model.h"
  29. #include "include/api/serialization.h"
  30. #include "include/api/context.h"
  31. using namespace mindspore;
  32. using namespace mindspore::dataset;
  33. using namespace mindspore::dataset::vision;
  34. class TestDE : public ST::Common {
  35. public:
  36. TestDE() {}
  37. };
  38. mindspore::MSTensor ReadFileToTensor(const std::string &file) {
  39. if (file.empty()) {
  40. std::cout << "[ERROR]Pointer file is nullptr, return an empty Tensor." << std::endl;
  41. return mindspore::MSTensor();
  42. }
  43. std::ifstream ifs(file);
  44. if (!ifs.good()) {
  45. std::cout << "[ERROR]File: " << file << " does not exist, return an empty Tensor." << std::endl;
  46. return mindspore::MSTensor();
  47. }
  48. if (!ifs.is_open()) {
  49. std::cout << "[ERROR]File: " << file << "open failed, return an empty Tensor." << std::endl;
  50. return mindspore::MSTensor();
  51. }
  52. ifs.seekg(0, std::ios::end);
  53. size_t size = ifs.tellg();
  54. mindspore::MSTensor buf("file", mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
  55. ifs.seekg(0, std::ios::beg);
  56. ifs.read(reinterpret_cast<char *>(buf.MutableData()), size);
  57. ifs.close();
  58. return buf;
  59. }
  60. TEST_F(TestDE, TestResNetPreprocess) {
  61. // Read images
  62. auto image = ReadFileToTensor("./data/dataset/apple.jpg");
  63. // Define transform operations
  64. std::shared_ptr<TensorTransform> decode(new vision::Decode());
  65. std::shared_ptr<TensorTransform> resize(new vision::Resize({224, 224}));
  66. std::shared_ptr<TensorTransform> normalize(
  67. new vision::Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}));
  68. std::shared_ptr<TensorTransform> hwc2chw(new vision::HWC2CHW());
  69. mindspore::dataset::Execute Transform({decode, resize, normalize, hwc2chw});
  70. // Apply transform on images
  71. Status rc = Transform(image, &image);
  72. // Check image info
  73. ASSERT_TRUE(rc.IsOk());
  74. ASSERT_EQ(image.Shape().size(), 3);
  75. ASSERT_EQ(image.Shape()[0], 3);
  76. ASSERT_EQ(image.Shape()[1], 224);
  77. ASSERT_EQ(image.Shape()[2], 224);
  78. }
  79. TEST_F(TestDE, TestDvpp) {
  80. #ifdef ENABLE_ACL
  81. // Read images from target directory
  82. /* Old internal method, we deprecate it
  83. std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
  84. Status rc = mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
  85. ASSERT_TRUE(rc.IsOk());
  86. auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
  87. */
  88. auto context = ContextAutoSet();
  89. ASSERT_TRUE(context != nullptr);
  90. ASSERT_TRUE(context->MutableDeviceInfo().size() == 1);
  91. auto ascend310_info = context->MutableDeviceInfo()[0]->Cast<Ascend310DeviceInfo>();
  92. ASSERT_TRUE(ascend310_info != nullptr);
  93. auto device_id = ascend310_info->GetDeviceID();
  94. auto image = ReadFileToTensor("./data/dataset/apple.jpg");
  95. // Define dvpp transform
  96. std::vector<uint32_t> crop_paras = {224, 224};
  97. std::vector<uint32_t> resize_paras = {256, 256};
  98. std::shared_ptr<TensorTransform> decode_resize_crop(new vision::DvppDecodeResizeCropJpeg(crop_paras, resize_paras));
  99. mindspore::dataset::Execute Transform(decode_resize_crop, MapTargetDevice::kAscend310, device_id);
  100. // Apply transform on images
  101. Status rc = Transform(image, &image);
  102. std::string aipp_cfg = Transform.AippCfgGenerator();
  103. ASSERT_EQ(aipp_cfg, "./aipp.cfg");
  104. // Check image info
  105. ASSERT_TRUE(rc.IsOk());
  106. ASSERT_EQ(image.Shape().size(), 2);
  107. int32_t real_h = 0;
  108. int32_t real_w = 0;
  109. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  110. if (crop_paras.size() == 1) {
  111. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  112. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  113. } else {
  114. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  115. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  116. }
  117. ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
  118. ASSERT_EQ(image.Shape()[1], real_w);
  119. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  120. ASSERT_TRUE(image.Data().get() != nullptr);
  121. ASSERT_EQ(image.DataType(), mindspore::DataType::kNumberTypeUInt8);
  122. ASSERT_EQ(image.IsDevice(), true);
  123. /* This is the criterion for previous method(Without pop)
  124. ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
  125. ASSERT_EQ(image.Shape()[1], 1);
  126. ASSERT_EQ(image.Shape()[2], 1);
  127. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  128. */
  129. #endif
  130. }
  131. TEST_F(TestDE, TestDvppSinkMode) {
  132. #ifdef ENABLE_ACL
  133. auto context = ContextAutoSet();
  134. ASSERT_TRUE(context != nullptr);
  135. ASSERT_TRUE(context->MutableDeviceInfo().size() == 1);
  136. auto ascend310_info = context->MutableDeviceInfo()[0]->Cast<Ascend310DeviceInfo>();
  137. ASSERT_TRUE(ascend310_info != nullptr);
  138. auto device_id = ascend310_info->GetDeviceID();
  139. // Read images from target directory
  140. auto image = ReadFileToTensor("./data/dataset/apple.jpg");
  141. // Define dvpp transform
  142. std::vector<int32_t> crop_paras = {224, 224};
  143. std::vector<int32_t> resize_paras = {256};
  144. std::shared_ptr<TensorTransform> decode(new vision::Decode());
  145. std::shared_ptr<TensorTransform> resize(new vision::Resize(resize_paras));
  146. std::shared_ptr<TensorTransform> centercrop(new vision::CenterCrop(crop_paras));
  147. std::vector<std::shared_ptr<TensorTransform>> trans_list = {decode, resize, centercrop};
  148. mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310, device_id);
  149. // Apply transform on images
  150. Status rc = Transform(image, &image);
  151. // Check image info
  152. ASSERT_TRUE(rc.IsOk());
  153. ASSERT_EQ(image.Shape().size(), 2);
  154. int32_t real_h = 0;
  155. int32_t real_w = 0;
  156. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  157. if (crop_paras.size() == 1) {
  158. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  159. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  160. } else {
  161. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  162. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  163. }
  164. ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
  165. ASSERT_EQ(image.Shape()[1], real_w);
  166. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  167. ASSERT_TRUE(image.Data().get() != nullptr);
  168. ASSERT_EQ(image.DataType(), mindspore::DataType::kNumberTypeUInt8);
  169. ASSERT_EQ(image.IsDevice(), true);
  170. Transform.DeviceMemoryRelease();
  171. #endif
  172. }
  173. TEST_F(TestDE, TestDvppDecodeResizeCropNormalize) {
  174. #ifdef ENABLE_ACL
  175. auto context = ContextAutoSet();
  176. ASSERT_TRUE(context != nullptr);
  177. ASSERT_TRUE(context->MutableDeviceInfo().size() == 1);
  178. auto ascend310_info = context->MutableDeviceInfo()[0]->Cast<Ascend310DeviceInfo>();
  179. ASSERT_TRUE(ascend310_info != nullptr);
  180. auto device_id = ascend310_info->GetDeviceID();
  181. auto image = ReadFileToTensor("./data/dataset/apple.jpg");
  182. // Define dvpp transform
  183. std::vector<int32_t> crop_paras = {416};
  184. std::vector<int32_t> resize_paras = {512};
  185. std::vector<float> mean = {0.485 * 255, 0.456 * 255, 0.406 * 255};
  186. std::vector<float> std = {0.229 * 255, 0.224 * 255, 0.225 * 255};
  187. std::shared_ptr<TensorTransform> decode(new vision::Decode());
  188. std::shared_ptr<TensorTransform> resize(new vision::Resize(resize_paras));
  189. std::shared_ptr<TensorTransform> centercrop(new vision::CenterCrop(crop_paras));
  190. std::shared_ptr<TensorTransform> normalize(new vision::Normalize(mean, std));
  191. std::vector<std::shared_ptr<TensorTransform>> trans_list = {decode, resize, centercrop, normalize};
  192. mindspore::dataset::Execute Transform(trans_list, MapTargetDevice::kAscend310, device_id);
  193. std::string aipp_cfg = Transform.AippCfgGenerator();
  194. ASSERT_EQ(aipp_cfg, "./aipp.cfg");
  195. // Apply transform on images
  196. Status rc = Transform(image, &image);
  197. // Check image info
  198. ASSERT_TRUE(rc.IsOk());
  199. ASSERT_EQ(image.Shape().size(), 2);
  200. int32_t real_h = 0;
  201. int32_t real_w = 0;
  202. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  203. if (crop_paras.size() == 1) {
  204. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  205. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  206. } else {
  207. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  208. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  209. }
  210. ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
  211. ASSERT_EQ(image.Shape()[1], real_w);
  212. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  213. ASSERT_TRUE(image.Data().get() != nullptr);
  214. ASSERT_EQ(image.DataType(), mindspore::DataType::kNumberTypeUInt8);
  215. ASSERT_EQ(image.IsDevice(), true);
  216. Transform.DeviceMemoryRelease();
  217. #endif
  218. }