You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_cut_out.py 7.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing CutOut op in DE
  17. """
  18. import numpy as np
  19. import mindspore.dataset as ds
  20. import mindspore.dataset.transforms.vision.c_transforms as c
  21. import mindspore.dataset.transforms.vision.py_transforms as f
  22. from mindspore import log as logger
  23. from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \
  24. config_get_set_seed, config_get_set_num_parallel_workers
  25. DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
  26. SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
  27. GENERATE_GOLDEN = False
  28. def test_cut_out_op(plot=False):
  29. """
  30. Test Cutout
  31. """
  32. logger.info("test_cut_out")
  33. # First dataset
  34. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  35. transforms_1 = [
  36. f.Decode(),
  37. f.ToTensor(),
  38. f.RandomErasing(value='random')
  39. ]
  40. transform_1 = f.ComposeOp(transforms_1)
  41. data1 = data1.map(input_columns=["image"], operations=transform_1())
  42. # Second dataset
  43. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  44. decode_op = c.Decode()
  45. cut_out_op = c.CutOut(80)
  46. transforms_2 = [
  47. decode_op,
  48. cut_out_op
  49. ]
  50. data2 = data2.map(input_columns=["image"], operations=transforms_2)
  51. num_iter = 0
  52. for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
  53. num_iter += 1
  54. image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
  55. # C image doesn't require transpose
  56. image_2 = item2["image"]
  57. logger.info("shape of image_1: {}".format(image_1.shape))
  58. logger.info("shape of image_2: {}".format(image_2.shape))
  59. logger.info("dtype of image_1: {}".format(image_1.dtype))
  60. logger.info("dtype of image_2: {}".format(image_2.dtype))
  61. mse = diff_mse(image_1, image_2)
  62. if plot:
  63. visualize_image(image_1, image_2, mse)
  64. def test_cut_out_op_multicut(plot=False):
  65. """
  66. Test Cutout
  67. """
  68. logger.info("test_cut_out")
  69. # First dataset
  70. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  71. transforms_1 = [
  72. f.Decode(),
  73. f.ToTensor(),
  74. ]
  75. transform_1 = f.ComposeOp(transforms_1)
  76. data1 = data1.map(input_columns=["image"], operations=transform_1())
  77. # Second dataset
  78. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  79. decode_op = c.Decode()
  80. cut_out_op = c.CutOut(80, num_patches=10)
  81. transforms_2 = [
  82. decode_op,
  83. cut_out_op
  84. ]
  85. data2 = data2.map(input_columns=["image"], operations=transforms_2)
  86. num_iter = 0
  87. image_list_1, image_list_2 = [], []
  88. for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
  89. num_iter += 1
  90. image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
  91. # C image doesn't require transpose
  92. image_2 = item2["image"]
  93. image_list_1.append(image_1)
  94. image_list_2.append(image_2)
  95. logger.info("shape of image_1: {}".format(image_1.shape))
  96. logger.info("shape of image_2: {}".format(image_2.shape))
  97. logger.info("dtype of image_1: {}".format(image_1.dtype))
  98. logger.info("dtype of image_2: {}".format(image_2.dtype))
  99. if plot:
  100. visualize_list(image_list_1, image_list_2)
  101. def test_cut_out_md5():
  102. """
  103. Test Cutout with md5 check
  104. """
  105. logger.info("test_cut_out_md5")
  106. original_seed = config_get_set_seed(2)
  107. original_num_parallel_workers = config_get_set_num_parallel_workers(1)
  108. # First dataset
  109. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  110. decode_op = c.Decode()
  111. cut_out_op = c.CutOut(100)
  112. data1 = data1.map(input_columns=["image"], operations=decode_op)
  113. data1 = data1.map(input_columns=["image"], operations=cut_out_op)
  114. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  115. transforms = [
  116. f.Decode(),
  117. f.ToTensor(),
  118. f.Cutout(100)
  119. ]
  120. transform = f.ComposeOp(transforms)
  121. data2 = data2.map(input_columns=["image"], operations=transform())
  122. # Compare with expected md5 from images
  123. filename1 = "cut_out_01_c_result.npz"
  124. save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
  125. filename2 = "cut_out_01_py_result.npz"
  126. save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
  127. # Restore config
  128. ds.config.set_seed(original_seed)
  129. ds.config.set_num_parallel_workers(original_num_parallel_workers)
  130. def test_cut_out_comp(plot=False):
  131. """
  132. Test Cutout with c++ and python op comparison
  133. """
  134. logger.info("test_cut_out_comp")
  135. # First dataset
  136. data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  137. transforms_1 = [
  138. f.Decode(),
  139. f.ToTensor(),
  140. f.Cutout(200)
  141. ]
  142. transform_1 = f.ComposeOp(transforms_1)
  143. data1 = data1.map(input_columns=["image"], operations=transform_1())
  144. # Second dataset
  145. data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
  146. transforms_2 = [
  147. c.Decode(),
  148. c.CutOut(200)
  149. ]
  150. data2 = data2.map(input_columns=["image"], operations=transforms_2)
  151. num_iter = 0
  152. image_list_1, image_list_2 = [], []
  153. for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
  154. num_iter += 1
  155. image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
  156. # C image doesn't require transpose
  157. image_2 = item2["image"]
  158. image_list_1.append(image_1)
  159. image_list_2.append(image_2)
  160. logger.info("shape of image_1: {}".format(image_1.shape))
  161. logger.info("shape of image_2: {}".format(image_2.shape))
  162. logger.info("dtype of image_1: {}".format(image_1.dtype))
  163. logger.info("dtype of image_2: {}".format(image_2.dtype))
  164. if plot:
  165. visualize_list(image_list_1, image_list_2, visualize_mode=2)
  166. if __name__ == "__main__":
  167. test_cut_out_op(plot=True)
  168. test_cut_out_op_multicut(plot=True)
  169. test_cut_out_md5()
  170. test_cut_out_comp(plot=True)