You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_pad.py 4.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing Pad op in DE
  17. """
  18. import matplotlib.pyplot as plt
  19. import numpy as np
  20. from util import diff_mse
  21. import mindspore.dataset as ds
  22. import mindspore.dataset.transforms.vision.c_transforms as c_vision
  23. import mindspore.dataset.transforms.vision.py_transforms as py_vision
  24. from mindspore import log as logger
  25. DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
  26. SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
  27. def test_pad_op():
  28. """
  29. Test Pad op
  30. """
  31. logger.info("test_random_color_jitter_op")
  32. # First dataset
  33. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  34. decode_op = c_vision.Decode()
  35. pad_op = c_vision.Pad((100, 100, 100, 100))
  36. ctrans = [decode_op,
  37. pad_op,
  38. ]
  39. data1 = data1.map(input_columns=["image"], operations=ctrans)
  40. # Second dataset
  41. transforms = [
  42. py_vision.Decode(),
  43. py_vision.Pad(100),
  44. py_vision.ToTensor(),
  45. ]
  46. transform = py_vision.ComposeOp(transforms)
  47. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  48. data2 = data2.map(input_columns=["image"], operations=transform())
  49. for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
  50. c_image = item1["image"]
  51. py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
  52. logger.info("shape of c_image: {}".format(c_image.shape))
  53. logger.info("shape of py_image: {}".format(py_image.shape))
  54. logger.info("dtype of c_image: {}".format(c_image.dtype))
  55. logger.info("dtype of py_image: {}".format(py_image.dtype))
  56. mse = diff_mse(c_image, py_image)
  57. logger.info("mse is {}".format(mse))
  58. assert mse < 0.01
  59. # pylint: disable=unnecessary-lambda
  60. def test_pad_grayscale():
  61. """
  62. Tests that the pad works for grayscale images
  63. """
  64. def channel_swap(image):
  65. """
  66. Py func hack for our pytransforms to work with c transforms
  67. """
  68. return (image.transpose(1, 2, 0) * 255).astype(np.uint8)
  69. transforms = [
  70. py_vision.Decode(),
  71. py_vision.Grayscale(1),
  72. py_vision.ToTensor(),
  73. (lambda image: channel_swap(image))
  74. ]
  75. transform = py_vision.ComposeOp(transforms)
  76. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  77. data1 = data1.map(input_columns=["image"], operations=transform())
  78. # if input is grayscale, the output dimensions should be single channel
  79. pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20))
  80. data1 = data1.map(input_columns=["image"], operations=pad_gray)
  81. dataset_shape_1 = []
  82. for item1 in data1.create_dict_iterator():
  83. c_image = item1["image"]
  84. dataset_shape_1.append(c_image.shape)
  85. # Dataset for comparison
  86. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  87. decode_op = c_vision.Decode()
  88. # we use the same padding logic
  89. ctrans = [decode_op, pad_gray]
  90. dataset_shape_2 = []
  91. data2 = data2.map(input_columns=["image"], operations=ctrans)
  92. for item2 in data2.create_dict_iterator():
  93. c_image = item2["image"]
  94. dataset_shape_2.append(c_image.shape)
  95. for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2):
  96. # validate that the first two dimensions are the same
  97. # we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale
  98. assert (shape1[0:1] == shape2[0:1])
  99. if __name__ == "__main__":
  100. test_pad_op()
  101. test_pad_grayscale()