You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_de.cc 7.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <string>
  17. #include <vector>
  18. #include "common/common_test.h"
  19. #include "include/api/types.h"
  20. #include "minddata/dataset/include/execute.h"
  21. #include "minddata/dataset/include/transforms.h"
  22. #include "minddata/dataset/include/vision.h"
  23. #ifdef ENABLE_ACL
  24. #include "minddata/dataset/include/vision_ascend.h"
  25. #endif
  26. #include "minddata/dataset/kernels/tensor_op.h"
  27. #include "include/api/model.h"
  28. #include "include/api/serialization.h"
  29. #include "include/api/context.h"
  30. using namespace mindspore;
  31. using namespace mindspore::dataset;
  32. using namespace mindspore::dataset::vision;
  33. class TestDE : public ST::Common {
  34. public:
  35. TestDE() {}
  36. };
  37. TEST_F(TestDE, TestResNetPreprocess) {
  38. // Read images
  39. std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
  40. mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
  41. auto image = mindspore::MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
  42. // Define transform operations
  43. auto decode(new vision::Decode());
  44. auto resize(new vision::Resize({224, 224}));
  45. auto normalize(
  46. new vision::Normalize({0.485 * 255, 0.456 * 255, 0.406 * 255}, {0.229 * 255, 0.224 * 255, 0.225 * 255}));
  47. auto hwc2chw(new vision::HWC2CHW());
  48. mindspore::dataset::Execute Transform({decode, resize, normalize, hwc2chw});
  49. // Apply transform on images
  50. Status rc = Transform(image, &image);
  51. // Check image info
  52. ASSERT_TRUE(rc.IsOk());
  53. ASSERT_EQ(image.Shape().size(), 3);
  54. ASSERT_EQ(image.Shape()[0], 3);
  55. ASSERT_EQ(image.Shape()[1], 224);
  56. ASSERT_EQ(image.Shape()[2], 224);
  57. }
  58. TEST_F(TestDE, TestDvpp) {
  59. #ifdef ENABLE_ACL
  60. // Read images from target directory
  61. std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
  62. mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
  63. auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
  64. // Define dvpp transform
  65. std::vector<uint32_t> crop_paras = {224, 224};
  66. std::vector<uint32_t> resize_paras = {256, 256};
  67. auto decode_resize_crop(new vision::DvppDecodeResizeCropJpeg(crop_paras, resize_paras));
  68. mindspore::dataset::Execute Transform(decode_resize_crop, MapTargetDevice::kAscend310);
  69. // Apply transform on images
  70. Status rc = Transform(image, &image);
  71. // Check image info
  72. ASSERT_TRUE(rc.IsOk());
  73. ASSERT_EQ(image.Shape().size(), 3);
  74. int32_t real_h = 0;
  75. int32_t real_w = 0;
  76. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  77. if (crop_paras.size() == 1) {
  78. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  79. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  80. } else {
  81. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  82. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  83. }
  84. /* Use in the future
  85. ASSERT_EQ(image.Shape()[0], real_h); // For image in YUV format, each pixel takes 1.5 byte
  86. ASSERT_EQ(image.Shape()[1], real_w);
  87. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  88. */
  89. ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
  90. ASSERT_EQ(image.Shape()[1], 1);
  91. ASSERT_EQ(image.Shape()[2], 1);
  92. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  93. #endif
  94. }
  95. TEST_F(TestDE, TestDvppSinkMode) {
  96. #ifdef ENABLE_ACL
  97. // Read images from target directory
  98. std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
  99. mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
  100. auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
  101. // Define dvpp transform
  102. std::vector<int32_t> crop_paras = {224, 224};
  103. std::vector<int32_t> resize_paras = {256};
  104. std::shared_ptr<TensorTransform> decode(new vision::Decode());
  105. std::shared_ptr<TensorTransform> resize(new vision::Resize(resize_paras));
  106. std::shared_ptr<TensorTransform> centercrop(new vision::CenterCrop(crop_paras));
  107. std::vector<std::shared_ptr<TensorTransform>> transforms = {decode, resize, centercrop};
  108. mindspore::dataset::Execute Transform(transforms, MapTargetDevice::kAscend310);
  109. // Apply transform on images
  110. Status rc = Transform(image, &image);
  111. // Check image info
  112. ASSERT_TRUE(rc.IsOk());
  113. ASSERT_EQ(image.Shape().size(), 3);
  114. int32_t real_h = 0;
  115. int32_t real_w = 0;
  116. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  117. if (crop_paras.size() == 1) {
  118. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  119. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  120. } else {
  121. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  122. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  123. }
  124. ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
  125. ASSERT_EQ(image.Shape()[1], 1);
  126. ASSERT_EQ(image.Shape()[2], 1);
  127. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  128. Transform.DeviceMemoryRelease();
  129. #endif
  130. }
  131. TEST_F(TestDE, TestDvppDecodeResizeCropNormalize) {
  132. #ifdef ENABLE_ACL
  133. std::shared_ptr<mindspore::dataset::Tensor> de_tensor;
  134. mindspore::dataset::Tensor::CreateFromFile("./data/dataset/apple.jpg", &de_tensor);
  135. auto image = MSTensor(std::make_shared<mindspore::dataset::DETensor>(de_tensor));
  136. // Define dvpp transform
  137. std::vector<int32_t> crop_paras = {416};
  138. std::vector<int32_t> resize_paras = {512};
  139. std::vector<float> mean = {0.485 * 255, 0.456 * 255, 0.406 * 255};
  140. std::vector<float> std = {0.229 * 255, 0.224 * 255, 0.225 * 255};
  141. auto decode(new vision::Decode());
  142. auto resize(new vision::Resize(resize_paras));
  143. auto centercrop(new vision::CenterCrop(crop_paras));
  144. auto normalize(new vision::Normalize(mean, std));
  145. std::vector<TensorTransform *> trans_lists = {decode, resize, centercrop, normalize};
  146. mindspore::dataset::Execute Transform(trans_lists, MapTargetDevice::kAscend310);
  147. std::string aipp_cfg = Transform.AippCfgGenerator();
  148. ASSERT_EQ(aipp_cfg, "./aipp.cfg");
  149. // Apply transform on images
  150. Status rc = Transform(image, &image);
  151. // Check image info
  152. ASSERT_TRUE(rc.IsOk());
  153. ASSERT_EQ(image.Shape().size(), 3);
  154. int32_t real_h = 0;
  155. int32_t real_w = 0;
  156. int32_t remainder = crop_paras[crop_paras.size() - 1] % 16;
  157. if (crop_paras.size() == 1) {
  158. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  159. real_w = (remainder == 0) ? crop_paras[0] : crop_paras[0] + 16 - remainder;
  160. } else {
  161. real_h = (crop_paras[0] % 2 == 0) ? crop_paras[0] : crop_paras[0] + 1;
  162. real_w = (remainder == 0) ? crop_paras[1] : crop_paras[1] + 16 - remainder;
  163. }
  164. ASSERT_EQ(image.Shape()[0], 1.5 * real_h * real_w); // For image in YUV format, each pixel takes 1.5 byte
  165. ASSERT_EQ(image.Shape()[1], 1);
  166. ASSERT_EQ(image.Shape()[2], 1);
  167. ASSERT_EQ(image.DataSize(), real_h * real_w * 1.5);
  168. Transform.DeviceMemoryRelease();
  169. #endif
  170. }