You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_pad.py 6.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing Pad op in DE
  17. """
  18. import numpy as np
  19. import mindspore.dataset as ds
  20. import mindspore.dataset.transforms.py_transforms
  21. import mindspore.dataset.vision.c_transforms as c_vision
  22. import mindspore.dataset.vision.py_transforms as py_vision
  23. from mindspore import log as logger
  24. from util import diff_mse, save_and_check_md5
  25. DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
  26. SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
  27. GENERATE_GOLDEN = False
  28. def test_pad_op():
  29. """
  30. Test Pad op
  31. """
  32. logger.info("test_random_color_jitter_op")
  33. # First dataset
  34. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  35. decode_op = c_vision.Decode()
  36. pad_op = c_vision.Pad((100, 100, 100, 100))
  37. ctrans = [decode_op,
  38. pad_op,
  39. ]
  40. data1 = data1.map(operations=ctrans, input_columns=["image"])
  41. # Second dataset
  42. transforms = [
  43. py_vision.Decode(),
  44. py_vision.Pad(100),
  45. py_vision.ToTensor(),
  46. ]
  47. transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
  48. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  49. data2 = data2.map(operations=transform, input_columns=["image"])
  50. for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
  51. data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
  52. c_image = item1["image"]
  53. py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
  54. logger.info("shape of c_image: {}".format(c_image.shape))
  55. logger.info("shape of py_image: {}".format(py_image.shape))
  56. logger.info("dtype of c_image: {}".format(c_image.dtype))
  57. logger.info("dtype of py_image: {}".format(py_image.dtype))
  58. mse = diff_mse(c_image, py_image)
  59. logger.info("mse is {}".format(mse))
  60. assert mse < 0.01
  61. def test_pad_op2():
  62. """
  63. Test Pad op2
  64. """
  65. logger.info("test padding parameter with size 2")
  66. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  67. decode_op = c_vision.Decode()
  68. resize_op = c_vision.Resize([90, 90])
  69. pad_op = c_vision.Pad((100, 9,))
  70. ctrans = [decode_op, resize_op, pad_op]
  71. data1 = data1.map(operations=ctrans, input_columns=["image"])
  72. for data in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
  73. logger.info(data["image"].shape)
  74. # It pads left, top with 100 and right, bottom with 9,
  75. # so the final size of image is 90 + 100 + 9 = 199
  76. assert data["image"].shape[0] == 199
  77. assert data["image"].shape[1] == 199
  78. def test_pad_grayscale():
  79. """
  80. Tests that the pad works for grayscale images
  81. """
  82. # Note: image.transpose performs channel swap to allow py transforms to
  83. # work with c transforms
  84. transforms = [
  85. py_vision.Decode(),
  86. py_vision.Grayscale(1),
  87. py_vision.ToTensor(),
  88. (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8))
  89. ]
  90. transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
  91. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  92. data1 = data1.map(operations=transform, input_columns=["image"])
  93. # if input is grayscale, the output dimensions should be single channel
  94. pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20))
  95. data1 = data1.map(operations=pad_gray, input_columns=["image"])
  96. dataset_shape_1 = []
  97. for item1 in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
  98. c_image = item1["image"]
  99. dataset_shape_1.append(c_image.shape)
  100. # Dataset for comparison
  101. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  102. decode_op = c_vision.Decode()
  103. # we use the same padding logic
  104. ctrans = [decode_op, pad_gray]
  105. dataset_shape_2 = []
  106. data2 = data2.map(operations=ctrans, input_columns=["image"])
  107. for item2 in data2.create_dict_iterator(num_epochs=1, output_numpy=True):
  108. c_image = item2["image"]
  109. dataset_shape_2.append(c_image.shape)
  110. for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2):
  111. # validate that the first two dimensions are the same
  112. # we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale
  113. assert shape1[0:1] == shape2[0:1]
  114. def test_pad_md5():
  115. """
  116. Test Pad with md5 check
  117. """
  118. logger.info("test_pad_md5")
  119. # First dataset
  120. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  121. decode_op = c_vision.Decode()
  122. pad_op = c_vision.Pad(150)
  123. ctrans = [decode_op,
  124. pad_op,
  125. ]
  126. data1 = data1.map(operations=ctrans, input_columns=["image"])
  127. # Second dataset
  128. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  129. pytrans = [
  130. py_vision.Decode(),
  131. py_vision.Pad(150),
  132. py_vision.ToTensor(),
  133. ]
  134. transform = mindspore.dataset.transforms.py_transforms.Compose(pytrans)
  135. data2 = data2.map(operations=transform, input_columns=["image"])
  136. # Compare with expected md5 from images
  137. filename1 = "pad_01_c_result.npz"
  138. save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
  139. filename2 = "pad_01_py_result.npz"
  140. save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
  141. if __name__ == "__main__":
  142. test_pad_op()
  143. test_pad_grayscale()
  144. test_pad_md5()