You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_panoptic_dataset.py 9.1 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import os.path as osp
  3. import tempfile
  4. import mmcv
  5. import numpy as np
  6. from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET, CocoPanopticDataset
  7. try:
  8. from panopticapi.utils import id2rgb
  9. except ImportError:
  10. id2rgb = None
  11. def _create_panoptic_style_json(json_name):
  12. image1 = {
  13. 'id': 0,
  14. 'width': 640,
  15. 'height': 640,
  16. 'file_name': 'fake_name1.jpg',
  17. }
  18. image2 = {
  19. 'id': 1,
  20. 'width': 640,
  21. 'height': 800,
  22. 'file_name': 'fake_name2.jpg',
  23. }
  24. images = [image1, image2]
  25. annotations = [
  26. {
  27. 'segments_info': [{
  28. 'id': 1,
  29. 'category_id': 0,
  30. 'area': 400,
  31. 'bbox': [50, 60, 20, 20],
  32. 'iscrowd': 0
  33. }, {
  34. 'id': 2,
  35. 'category_id': 1,
  36. 'area': 900,
  37. 'bbox': [100, 120, 30, 30],
  38. 'iscrowd': 0
  39. }, {
  40. 'id': 3,
  41. 'category_id': 2,
  42. 'iscrowd': 0,
  43. 'bbox': [1, 189, 612, 285],
  44. 'area': 70036
  45. }],
  46. 'file_name':
  47. 'fake_name1.jpg',
  48. 'image_id':
  49. 0
  50. },
  51. {
  52. 'segments_info': [
  53. {
  54. # Different to instance style json, there
  55. # are duplicate ids in panoptic style json
  56. 'id': 1,
  57. 'category_id': 0,
  58. 'area': 400,
  59. 'bbox': [50, 60, 20, 20],
  60. 'iscrowd': 0
  61. },
  62. {
  63. 'id': 4,
  64. 'category_id': 1,
  65. 'area': 900,
  66. 'bbox': [100, 120, 30, 30],
  67. 'iscrowd': 1
  68. },
  69. {
  70. 'id': 5,
  71. 'category_id': 2,
  72. 'iscrowd': 0,
  73. 'bbox': [100, 200, 200, 300],
  74. 'area': 66666
  75. },
  76. {
  77. 'id': 6,
  78. 'category_id': 0,
  79. 'iscrowd': 0,
  80. 'bbox': [1, 189, -10, 285],
  81. 'area': 70036
  82. }
  83. ],
  84. 'file_name':
  85. 'fake_name2.jpg',
  86. 'image_id':
  87. 1
  88. }
  89. ]
  90. categories = [{
  91. 'id': 0,
  92. 'name': 'car',
  93. 'supercategory': 'car',
  94. 'isthing': 1
  95. }, {
  96. 'id': 1,
  97. 'name': 'person',
  98. 'supercategory': 'person',
  99. 'isthing': 1
  100. }, {
  101. 'id': 2,
  102. 'name': 'wall',
  103. 'supercategory': 'wall',
  104. 'isthing': 0
  105. }]
  106. fake_json = {
  107. 'images': images,
  108. 'annotations': annotations,
  109. 'categories': categories
  110. }
  111. mmcv.dump(fake_json, json_name)
  112. return fake_json
  113. def test_load_panoptic_style_json():
  114. tmp_dir = tempfile.TemporaryDirectory()
  115. fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
  116. fake_json = _create_panoptic_style_json(fake_json_file)
  117. dataset = CocoPanopticDataset(
  118. ann_file=fake_json_file,
  119. classes=[cat['name'] for cat in fake_json['categories']],
  120. pipeline=[])
  121. ann = dataset.get_ann_info(0)
  122. # two legal instances
  123. assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2
  124. # three masks for both foreground and background
  125. assert len(ann['masks']) == 3
  126. ann = dataset.get_ann_info(1)
  127. # one legal instance, one illegal instance,
  128. # one crowd instance and one background mask
  129. assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1
  130. assert ann['bboxes_ignore'].shape[0] == 1
  131. assert len(ann['masks']) == 3
  132. def _create_panoptic_gt_annotations(ann_file):
  133. categories = [{
  134. 'id': 0,
  135. 'name': 'person',
  136. 'supercategory': 'person',
  137. 'isthing': 1
  138. }, {
  139. 'id': 1,
  140. 'name': 'dog',
  141. 'supercategory': 'dog',
  142. 'isthing': 1
  143. }, {
  144. 'id': 2,
  145. 'name': 'wall',
  146. 'supercategory': 'wall',
  147. 'isthing': 0
  148. }]
  149. images = [{
  150. 'id': 0,
  151. 'width': 80,
  152. 'height': 60,
  153. 'file_name': 'fake_name1.jpg',
  154. }]
  155. annotations = [{
  156. 'segments_info': [{
  157. 'id': 1,
  158. 'category_id': 0,
  159. 'area': 400,
  160. 'bbox': [10, 10, 10, 40],
  161. 'iscrowd': 0
  162. }, {
  163. 'id': 2,
  164. 'category_id': 0,
  165. 'area': 400,
  166. 'bbox': [30, 10, 10, 40],
  167. 'iscrowd': 0
  168. }, {
  169. 'id': 3,
  170. 'category_id': 1,
  171. 'iscrowd': 0,
  172. 'bbox': [50, 10, 10, 5],
  173. 'area': 50
  174. }, {
  175. 'id': 4,
  176. 'category_id': 2,
  177. 'iscrowd': 0,
  178. 'bbox': [0, 0, 80, 60],
  179. 'area': 3950
  180. }],
  181. 'file_name':
  182. 'fake_name1.png',
  183. 'image_id':
  184. 0
  185. }]
  186. gt_json = {
  187. 'images': images,
  188. 'annotations': annotations,
  189. 'categories': categories
  190. }
  191. # 4 is the id of the background class annotation.
  192. gt = np.zeros((60, 80), dtype=np.int64) + 4
  193. gt_bboxes = np.array([[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
  194. dtype=np.int64)
  195. for i in range(3):
  196. x, y, w, h = gt_bboxes[i]
  197. gt[y:y + h, x:x + w] = i + 1 # id starts from 1
  198. gt = id2rgb(gt).astype(np.uint8)
  199. img_path = osp.join(osp.dirname(ann_file), 'fake_name1.png')
  200. mmcv.imwrite(gt[:, :, ::-1], img_path)
  201. mmcv.dump(gt_json, ann_file)
  202. return gt_json
  203. def test_panoptic_evaluation():
  204. if id2rgb is None:
  205. return
  206. # TP for background class, IoU=3576/4324=0.827
  207. # 2 the category id of the background class
  208. pred = np.zeros((60, 80), dtype=np.int64) + 2
  209. pred_bboxes = np.array(
  210. [
  211. [11, 11, 10, 40], # TP IoU=351/449=0.78
  212. [38, 10, 10, 40], # FP
  213. [51, 10, 10, 5]
  214. ], # TP IoU=45/55=0.818
  215. dtype=np.int64)
  216. pred_labels = np.array([0, 0, 1], dtype=np.int64)
  217. for i in range(3):
  218. x, y, w, h = pred_bboxes[i]
  219. pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]
  220. tmp_dir = tempfile.TemporaryDirectory()
  221. ann_file = osp.join(tmp_dir.name, 'panoptic.json')
  222. gt_json = _create_panoptic_gt_annotations(ann_file)
  223. results = [{'pan_results': pred}]
  224. dataset = CocoPanopticDataset(
  225. ann_file=ann_file,
  226. seg_prefix=tmp_dir.name,
  227. classes=[cat['name'] for cat in gt_json['categories']],
  228. pipeline=[])
  229. # For 'person', sq = 0.78 / 1, rq = 1 / 2( 1 tp + 0.5 * (1 fn + 1 fp))
  230. # For 'dog', sq = 0.818, rq = 1 / 1
  231. # For 'wall', sq = 0.827, rq = 1 / 1
  232. # Here is the results for all classes:
  233. # +--------+--------+--------+---------+------------+
  234. # | | PQ | SQ | RQ | categories |
  235. # +--------+--------+--------+---------+------------+
  236. # | All | 67.869 | 80.898 | 83.333 | 3 |
  237. # | Things | 60.453 | 79.996 | 75.000 | 2 |
  238. # | Stuff | 82.701 | 82.701 | 100.000 | 1 |
  239. # +--------+--------+--------+---------+------------+
  240. parsed_results = dataset.evaluate(results)
  241. assert np.isclose(parsed_results['PQ'], 67.869)
  242. assert np.isclose(parsed_results['SQ'], 80.898)
  243. assert np.isclose(parsed_results['RQ'], 83.333)
  244. assert np.isclose(parsed_results['PQ_th'], 60.453)
  245. assert np.isclose(parsed_results['SQ_th'], 79.996)
  246. assert np.isclose(parsed_results['RQ_th'], 75.000)
  247. assert np.isclose(parsed_results['PQ_st'], 82.701)
  248. assert np.isclose(parsed_results['SQ_st'], 82.701)
  249. assert np.isclose(parsed_results['RQ_st'], 100.000)
  250. # test jsonfile_prefix
  251. outfile_prefix = osp.join(tmp_dir.name, 'results')
  252. parsed_results = dataset.evaluate(results, jsonfile_prefix=outfile_prefix)
  253. assert np.isclose(parsed_results['PQ'], 67.869)
  254. assert np.isclose(parsed_results['SQ'], 80.898)
  255. assert np.isclose(parsed_results['RQ'], 83.333)
  256. assert np.isclose(parsed_results['PQ_th'], 60.453)
  257. assert np.isclose(parsed_results['SQ_th'], 79.996)
  258. assert np.isclose(parsed_results['RQ_th'], 75.000)
  259. assert np.isclose(parsed_results['PQ_st'], 82.701)
  260. assert np.isclose(parsed_results['SQ_st'], 82.701)
  261. assert np.isclose(parsed_results['RQ_st'], 100.000)
  262. # test classwise
  263. parsed_results = dataset.evaluate(results, classwise=True)
  264. assert np.isclose(parsed_results['PQ'], 67.869)
  265. assert np.isclose(parsed_results['SQ'], 80.898)
  266. assert np.isclose(parsed_results['RQ'], 83.333)
  267. assert np.isclose(parsed_results['PQ_th'], 60.453)
  268. assert np.isclose(parsed_results['SQ_th'], 79.996)
  269. assert np.isclose(parsed_results['RQ_th'], 75.000)
  270. assert np.isclose(parsed_results['PQ_st'], 82.701)
  271. assert np.isclose(parsed_results['SQ_st'], 82.701)
  272. assert np.isclose(parsed_results['RQ_st'], 100.000)

No Description

Contributors (2)