You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cityscapes.py 14 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. # Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
  3. # and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
  4. import glob
  5. import os
  6. import os.path as osp
  7. import tempfile
  8. from collections import OrderedDict
  9. import mmcv
  10. import numpy as np
  11. import pycocotools.mask as maskUtils
  12. from mmcv.utils import print_log
  13. from .builder import DATASETS
  14. from .coco import CocoDataset
  15. @DATASETS.register_module()
  16. class CityscapesDataset(CocoDataset):
  17. CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
  18. 'bicycle')
  19. def _filter_imgs(self, min_size=32):
  20. """Filter images too small or without ground truths."""
  21. valid_inds = []
  22. # obtain images that contain annotation
  23. ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
  24. # obtain images that contain annotations of the required categories
  25. ids_in_cat = set()
  26. for i, class_id in enumerate(self.cat_ids):
  27. ids_in_cat |= set(self.coco.cat_img_map[class_id])
  28. # merge the image id sets of the two conditions and use the merged set
  29. # to filter out images if self.filter_empty_gt=True
  30. ids_in_cat &= ids_with_ann
  31. valid_img_ids = []
  32. for i, img_info in enumerate(self.data_infos):
  33. img_id = img_info['id']
  34. ann_ids = self.coco.getAnnIds(imgIds=[img_id])
  35. ann_info = self.coco.loadAnns(ann_ids)
  36. all_iscrowd = all([_['iscrowd'] for _ in ann_info])
  37. if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat
  38. or all_iscrowd):
  39. continue
  40. if min(img_info['width'], img_info['height']) >= min_size:
  41. valid_inds.append(i)
  42. valid_img_ids.append(img_id)
  43. self.img_ids = valid_img_ids
  44. return valid_inds
  45. def _parse_ann_info(self, img_info, ann_info):
  46. """Parse bbox and mask annotation.
  47. Args:
  48. img_info (dict): Image info of an image.
  49. ann_info (list[dict]): Annotation info of an image.
  50. Returns:
  51. dict: A dict containing the following keys: bboxes, \
  52. bboxes_ignore, labels, masks, seg_map. \
  53. "masks" are already decoded into binary masks.
  54. """
  55. gt_bboxes = []
  56. gt_labels = []
  57. gt_bboxes_ignore = []
  58. gt_masks_ann = []
  59. for i, ann in enumerate(ann_info):
  60. if ann.get('ignore', False):
  61. continue
  62. x1, y1, w, h = ann['bbox']
  63. if ann['area'] <= 0 or w < 1 or h < 1:
  64. continue
  65. if ann['category_id'] not in self.cat_ids:
  66. continue
  67. bbox = [x1, y1, x1 + w, y1 + h]
  68. if ann.get('iscrowd', False):
  69. gt_bboxes_ignore.append(bbox)
  70. else:
  71. gt_bboxes.append(bbox)
  72. gt_labels.append(self.cat2label[ann['category_id']])
  73. gt_masks_ann.append(ann['segmentation'])
  74. if gt_bboxes:
  75. gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
  76. gt_labels = np.array(gt_labels, dtype=np.int64)
  77. else:
  78. gt_bboxes = np.zeros((0, 4), dtype=np.float32)
  79. gt_labels = np.array([], dtype=np.int64)
  80. if gt_bboxes_ignore:
  81. gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
  82. else:
  83. gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
  84. ann = dict(
  85. bboxes=gt_bboxes,
  86. labels=gt_labels,
  87. bboxes_ignore=gt_bboxes_ignore,
  88. masks=gt_masks_ann,
  89. seg_map=img_info['segm_file'])
  90. return ann
  91. def results2txt(self, results, outfile_prefix):
  92. """Dump the detection results to a txt file.
  93. Args:
  94. results (list[list | tuple]): Testing results of the
  95. dataset.
  96. outfile_prefix (str): The filename prefix of the json files.
  97. If the prefix is "somepath/xxx",
  98. the txt files will be named "somepath/xxx.txt".
  99. Returns:
  100. list[str]: Result txt files which contains corresponding \
  101. instance segmentation images.
  102. """
  103. try:
  104. import cityscapesscripts.helpers.labels as CSLabels
  105. except ImportError:
  106. raise ImportError('Please run "pip install citscapesscripts" to '
  107. 'install cityscapesscripts first.')
  108. result_files = []
  109. os.makedirs(outfile_prefix, exist_ok=True)
  110. prog_bar = mmcv.ProgressBar(len(self))
  111. for idx in range(len(self)):
  112. result = results[idx]
  113. filename = self.data_infos[idx]['filename']
  114. basename = osp.splitext(osp.basename(filename))[0]
  115. pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
  116. bbox_result, segm_result = result
  117. bboxes = np.vstack(bbox_result)
  118. # segm results
  119. if isinstance(segm_result, tuple):
  120. # Some detectors use different scores for bbox and mask,
  121. # like Mask Scoring R-CNN. Score of segm will be used instead
  122. # of bbox score.
  123. segms = mmcv.concat_list(segm_result[0])
  124. mask_score = segm_result[1]
  125. else:
  126. # use bbox score for mask score
  127. segms = mmcv.concat_list(segm_result)
  128. mask_score = [bbox[-1] for bbox in bboxes]
  129. labels = [
  130. np.full(bbox.shape[0], i, dtype=np.int32)
  131. for i, bbox in enumerate(bbox_result)
  132. ]
  133. labels = np.concatenate(labels)
  134. assert len(bboxes) == len(segms) == len(labels)
  135. num_instances = len(bboxes)
  136. prog_bar.update()
  137. with open(pred_txt, 'w') as fout:
  138. for i in range(num_instances):
  139. pred_class = labels[i]
  140. classes = self.CLASSES[pred_class]
  141. class_id = CSLabels.name2label[classes].id
  142. score = mask_score[i]
  143. mask = maskUtils.decode(segms[i]).astype(np.uint8)
  144. png_filename = osp.join(outfile_prefix,
  145. basename + f'_{i}_{classes}.png')
  146. mmcv.imwrite(mask, png_filename)
  147. fout.write(f'{osp.basename(png_filename)} {class_id} '
  148. f'{score}\n')
  149. result_files.append(pred_txt)
  150. return result_files
  151. def format_results(self, results, txtfile_prefix=None):
  152. """Format the results to txt (standard format for Cityscapes
  153. evaluation).
  154. Args:
  155. results (list): Testing results of the dataset.
  156. txtfile_prefix (str | None): The prefix of txt files. It includes
  157. the file path and the prefix of filename, e.g., "a/b/prefix".
  158. If not specified, a temp file will be created. Default: None.
  159. Returns:
  160. tuple: (result_files, tmp_dir), result_files is a dict containing \
  161. the json filepaths, tmp_dir is the temporal directory created \
  162. for saving txt/png files when txtfile_prefix is not specified.
  163. """
  164. assert isinstance(results, list), 'results must be a list'
  165. assert len(results) == len(self), (
  166. 'The length of results is not equal to the dataset len: {} != {}'.
  167. format(len(results), len(self)))
  168. assert isinstance(results, list), 'results must be a list'
  169. assert len(results) == len(self), (
  170. 'The length of results is not equal to the dataset len: {} != {}'.
  171. format(len(results), len(self)))
  172. if txtfile_prefix is None:
  173. tmp_dir = tempfile.TemporaryDirectory()
  174. txtfile_prefix = osp.join(tmp_dir.name, 'results')
  175. else:
  176. tmp_dir = None
  177. result_files = self.results2txt(results, txtfile_prefix)
  178. return result_files, tmp_dir
  179. def evaluate(self,
  180. results,
  181. metric='bbox',
  182. logger=None,
  183. outfile_prefix=None,
  184. classwise=False,
  185. proposal_nums=(100, 300, 1000),
  186. iou_thrs=np.arange(0.5, 0.96, 0.05)):
  187. """Evaluation in Cityscapes/COCO protocol.
  188. Args:
  189. results (list[list | tuple]): Testing results of the dataset.
  190. metric (str | list[str]): Metrics to be evaluated. Options are
  191. 'bbox', 'segm', 'proposal', 'proposal_fast'.
  192. logger (logging.Logger | str | None): Logger used for printing
  193. related information during evaluation. Default: None.
  194. outfile_prefix (str | None): The prefix of output file. It includes
  195. the file path and the prefix of filename, e.g., "a/b/prefix".
  196. If results are evaluated with COCO protocol, it would be the
  197. prefix of output json file. For example, the metric is 'bbox'
  198. and 'segm', then json files would be "a/b/prefix.bbox.json" and
  199. "a/b/prefix.segm.json".
  200. If results are evaluated with cityscapes protocol, it would be
  201. the prefix of output txt/png files. The output files would be
  202. png images under folder "a/b/prefix/xxx/" and the file name of
  203. images would be written into a txt file
  204. "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of
  205. cityscapes. If not specified, a temp file will be created.
  206. Default: None.
  207. classwise (bool): Whether to evaluating the AP for each class.
  208. proposal_nums (Sequence[int]): Proposal number used for evaluating
  209. recalls, such as recall@100, recall@1000.
  210. Default: (100, 300, 1000).
  211. iou_thrs (Sequence[float]): IoU threshold used for evaluating
  212. recalls. If set to a list, the average recall of all IoUs will
  213. also be computed. Default: 0.5.
  214. Returns:
  215. dict[str, float]: COCO style evaluation metric or cityscapes mAP \
  216. and AP@50.
  217. """
  218. eval_results = dict()
  219. metrics = metric.copy() if isinstance(metric, list) else [metric]
  220. if 'cityscapes' in metrics:
  221. eval_results.update(
  222. self._evaluate_cityscapes(results, outfile_prefix, logger))
  223. metrics.remove('cityscapes')
  224. # left metrics are all coco metric
  225. if len(metrics) > 0:
  226. # create CocoDataset with CityscapesDataset annotation
  227. self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
  228. None, self.data_root, self.img_prefix,
  229. self.seg_prefix, self.proposal_file,
  230. self.test_mode, self.filter_empty_gt)
  231. # TODO: remove this in the future
  232. # reload annotations of correct class
  233. self_coco.CLASSES = self.CLASSES
  234. self_coco.data_infos = self_coco.load_annotations(self.ann_file)
  235. eval_results.update(
  236. self_coco.evaluate(results, metrics, logger, outfile_prefix,
  237. classwise, proposal_nums, iou_thrs))
  238. return eval_results
  239. def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
  240. """Evaluation in Cityscapes protocol.
  241. Args:
  242. results (list): Testing results of the dataset.
  243. txtfile_prefix (str | None): The prefix of output txt file
  244. logger (logging.Logger | str | None): Logger used for printing
  245. related information during evaluation. Default: None.
  246. Returns:
  247. dict[str: float]: Cityscapes evaluation results, contains 'mAP' \
  248. and 'AP@50'.
  249. """
  250. try:
  251. import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
  252. except ImportError:
  253. raise ImportError('Please run "pip install citscapesscripts" to '
  254. 'install cityscapesscripts first.')
  255. msg = 'Evaluating in Cityscapes style'
  256. if logger is None:
  257. msg = '\n' + msg
  258. print_log(msg, logger=logger)
  259. result_files, tmp_dir = self.format_results(results, txtfile_prefix)
  260. if tmp_dir is None:
  261. result_dir = osp.join(txtfile_prefix, 'results')
  262. else:
  263. result_dir = osp.join(tmp_dir.name, 'results')
  264. eval_results = OrderedDict()
  265. print_log(f'Evaluating results under {result_dir} ...', logger=logger)
  266. # set global states in cityscapes evaluation API
  267. CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
  268. CSEval.args.predictionPath = os.path.abspath(result_dir)
  269. CSEval.args.predictionWalk = None
  270. CSEval.args.JSONOutput = False
  271. CSEval.args.colorized = False
  272. CSEval.args.gtInstancesFile = os.path.join(result_dir,
  273. 'gtInstances.json')
  274. CSEval.args.groundTruthSearch = os.path.join(
  275. self.img_prefix.replace('leftImg8bit', 'gtFine'),
  276. '*/*_gtFine_instanceIds.png')
  277. groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
  278. assert len(groundTruthImgList), 'Cannot find ground truth images' \
  279. f' in {CSEval.args.groundTruthSearch}.'
  280. predictionImgList = []
  281. for gt in groundTruthImgList:
  282. predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
  283. CSEval_results = CSEval.evaluateImgLists(predictionImgList,
  284. groundTruthImgList,
  285. CSEval.args)['averages']
  286. eval_results['mAP'] = CSEval_results['allAp']
  287. eval_results['AP@50'] = CSEval_results['allAp50%']
  288. if tmp_dir is not None:
  289. tmp_dir.cleanup()
  290. return eval_results

No Description

Contributors (3)