You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_datasets_voc.py 6.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import mindspore.dataset as ds
  16. import mindspore.dataset.transforms.vision.c_transforms as vision
  17. DATA_DIR = "../data/dataset/testVOC2012"
  18. IMAGE_SHAPE = [2268, 2268, 2268, 2268, 642, 607, 561, 596, 612, 2268]
  19. TARGET_SHAPE = [680, 680, 680, 680, 642, 607, 561, 596, 612, 680]
  20. def test_voc_segmentation():
  21. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True, shuffle=False)
  22. num = 0
  23. for item in data1.create_dict_iterator():
  24. assert item["image"].shape[0] == IMAGE_SHAPE[num]
  25. assert item["target"].shape[0] == TARGET_SHAPE[num]
  26. num += 1
  27. assert num == 10
  28. def test_voc_detection():
  29. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True, shuffle=False)
  30. num = 0
  31. count = [0, 0, 0, 0, 0, 0]
  32. for item in data1.create_dict_iterator():
  33. assert item["image"].shape[0] == IMAGE_SHAPE[num]
  34. for bbox in item["annotation"]:
  35. count[int(bbox[6])] += 1
  36. num += 1
  37. assert num == 9
  38. assert count == [3, 2, 1, 2, 4, 3]
  39. def test_voc_class_index():
  40. class_index = {'car': 0, 'cat': 1, 'train': 5}
  41. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", class_indexing=class_index, decode=True)
  42. class_index1 = data1.get_class_indexing()
  43. assert (class_index1 == {'car': 0, 'cat': 1, 'train': 5})
  44. data1 = data1.shuffle(4)
  45. class_index2 = data1.get_class_indexing()
  46. assert (class_index2 == {'car': 0, 'cat': 1, 'train': 5})
  47. num = 0
  48. count = [0, 0, 0, 0, 0, 0]
  49. for item in data1.create_dict_iterator():
  50. for bbox in item["annotation"]:
  51. assert (int(bbox[6]) == 0 or int(bbox[6]) == 1 or int(bbox[6]) == 5)
  52. count[int(bbox[6])] += 1
  53. num += 1
  54. assert num == 6
  55. assert count == [3, 2, 0, 0, 0, 3]
  56. def test_voc_get_class_indexing():
  57. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True)
  58. class_index1 = data1.get_class_indexing()
  59. assert (class_index1 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
  60. data1 = data1.shuffle(4)
  61. class_index2 = data1.get_class_indexing()
  62. assert (class_index2 == {'car': 0, 'cat': 1, 'chair': 2, 'dog': 3, 'person': 4, 'train': 5})
  63. num = 0
  64. count = [0, 0, 0, 0, 0, 0]
  65. for item in data1.create_dict_iterator():
  66. for bbox in item["annotation"]:
  67. assert (int(bbox[6]) == 0 or int(bbox[6]) == 1 or int(bbox[6]) == 2 or int(bbox[6]) == 3
  68. or int(bbox[6]) == 4 or int(bbox[6]) == 5)
  69. count[int(bbox[6])] += 1
  70. num += 1
  71. assert num == 9
  72. assert count == [3, 2, 1, 2, 4, 3]
  73. def test_case_0():
  74. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True)
  75. resize_op = vision.Resize((224, 224))
  76. data1 = data1.map(input_columns=["image"], operations=resize_op)
  77. data1 = data1.map(input_columns=["target"], operations=resize_op)
  78. repeat_num = 4
  79. data1 = data1.repeat(repeat_num)
  80. batch_size = 2
  81. data1 = data1.batch(batch_size, drop_remainder=True)
  82. num = 0
  83. for _ in data1.create_dict_iterator():
  84. num += 1
  85. assert num == 20
  86. def test_case_1():
  87. data1 = ds.VOCDataset(DATA_DIR, task="Detection", mode="train", decode=True)
  88. resize_op = vision.Resize((224, 224))
  89. data1 = data1.map(input_columns=["image"], operations=resize_op)
  90. repeat_num = 4
  91. data1 = data1.repeat(repeat_num)
  92. batch_size = 2
  93. data1 = data1.batch(batch_size, drop_remainder=True, pad_info={})
  94. num = 0
  95. for _ in data1.create_dict_iterator():
  96. num += 1
  97. assert num == 18
  98. def test_case_2():
  99. data1 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", decode=True)
  100. sizes = [0.5, 0.5]
  101. randomize = False
  102. dataset1, dataset2 = data1.split(sizes=sizes, randomize=randomize)
  103. num_iter = 0
  104. for _ in dataset1.create_dict_iterator():
  105. num_iter += 1
  106. assert num_iter == 5
  107. num_iter = 0
  108. for _ in dataset2.create_dict_iterator():
  109. num_iter += 1
  110. assert num_iter == 5
  111. def test_voc_exception():
  112. try:
  113. data1 = ds.VOCDataset(DATA_DIR, task="InvalidTask", mode="train", decode=True)
  114. for _ in data1.create_dict_iterator():
  115. pass
  116. assert False
  117. except ValueError:
  118. pass
  119. try:
  120. data2 = ds.VOCDataset(DATA_DIR, task="Segmentation", mode="train", class_indexing={"cat": 0}, decode=True)
  121. for _ in data2.create_dict_iterator():
  122. pass
  123. assert False
  124. except ValueError:
  125. pass
  126. try:
  127. data3 = ds.VOCDataset(DATA_DIR, task="Detection", mode="notexist", decode=True)
  128. for _ in data3.create_dict_iterator():
  129. pass
  130. assert False
  131. except ValueError:
  132. pass
  133. try:
  134. data4 = ds.VOCDataset(DATA_DIR, task="Detection", mode="xmlnotexist", decode=True)
  135. for _ in data4.create_dict_iterator():
  136. pass
  137. assert False
  138. except RuntimeError:
  139. pass
  140. try:
  141. data5 = ds.VOCDataset(DATA_DIR, task="Detection", mode="invalidxml", decode=True)
  142. for _ in data5.create_dict_iterator():
  143. pass
  144. assert False
  145. except RuntimeError:
  146. pass
  147. try:
  148. data6 = ds.VOCDataset(DATA_DIR, task="Detection", mode="xmlnoobject", decode=True)
  149. for _ in data6.create_dict_iterator():
  150. pass
  151. assert False
  152. except RuntimeError:
  153. pass
  154. if __name__ == '__main__':
  155. test_voc_segmentation()
  156. test_voc_detection()
  157. test_voc_class_index()
  158. test_voc_get_class_indexing()
  159. test_case_0()
  160. test_case_1()
  161. test_case_2()
  162. test_voc_exception()