You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

anchor_free_head.py 14 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import warnings
  3. from abc import abstractmethod
  4. import torch
  5. import torch.nn as nn
  6. from mmcv.cnn import ConvModule
  7. from mmcv.runner import force_fp32
  8. from mmdet.core import build_bbox_coder, multi_apply
  9. from mmdet.core.anchor.point_generator import MlvlPointGenerator
  10. from ..builder import HEADS, build_loss
  11. from .base_dense_head import BaseDenseHead
  12. from .dense_test_mixins import BBoxTestMixin
  13. @HEADS.register_module()
  14. class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
  15. """Anchor-free head (FCOS, Fovea, RepPoints, etc.).
  16. Args:
  17. num_classes (int): Number of categories excluding the background
  18. category.
  19. in_channels (int): Number of channels in the input feature map.
  20. feat_channels (int): Number of hidden channels. Used in child classes.
  21. stacked_convs (int): Number of stacking convs of the head.
  22. strides (tuple): Downsample factor of each feature map.
  23. dcn_on_last_conv (bool): If true, use dcn in the last layer of
  24. towers. Default: False.
  25. conv_bias (bool | str): If specified as `auto`, it will be decided by
  26. the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
  27. None, otherwise False. Default: "auto".
  28. loss_cls (dict): Config of classification loss.
  29. loss_bbox (dict): Config of localization loss.
  30. bbox_coder (dict): Config of bbox coder. Defaults
  31. 'DistancePointBBoxCoder'.
  32. conv_cfg (dict): Config dict for convolution layer. Default: None.
  33. norm_cfg (dict): Config dict for normalization layer. Default: None.
  34. train_cfg (dict): Training config of anchor head.
  35. test_cfg (dict): Testing config of anchor head.
  36. init_cfg (dict or list[dict], optional): Initialization config dict.
  37. """ # noqa: W605
  38. _version = 1
  39. def __init__(self,
  40. num_classes,
  41. in_channels,
  42. feat_channels=256,
  43. stacked_convs=4,
  44. strides=(4, 8, 16, 32, 64),
  45. dcn_on_last_conv=False,
  46. conv_bias='auto',
  47. loss_cls=dict(
  48. type='FocalLoss',
  49. use_sigmoid=True,
  50. gamma=2.0,
  51. alpha=0.25,
  52. loss_weight=1.0),
  53. loss_bbox=dict(type='IoULoss', loss_weight=1.0),
  54. bbox_coder=dict(type='DistancePointBBoxCoder'),
  55. conv_cfg=None,
  56. norm_cfg=None,
  57. train_cfg=None,
  58. test_cfg=None,
  59. init_cfg=dict(
  60. type='Normal',
  61. layer='Conv2d',
  62. std=0.01,
  63. override=dict(
  64. type='Normal',
  65. name='conv_cls',
  66. std=0.01,
  67. bias_prob=0.01))):
  68. super(AnchorFreeHead, self).__init__(init_cfg)
  69. self.num_classes = num_classes
  70. self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
  71. if self.use_sigmoid_cls:
  72. self.cls_out_channels = num_classes
  73. else:
  74. self.cls_out_channels = num_classes + 1
  75. self.in_channels = in_channels
  76. self.feat_channels = feat_channels
  77. self.stacked_convs = stacked_convs
  78. self.strides = strides
  79. self.dcn_on_last_conv = dcn_on_last_conv
  80. assert conv_bias == 'auto' or isinstance(conv_bias, bool)
  81. self.conv_bias = conv_bias
  82. self.loss_cls = build_loss(loss_cls)
  83. self.loss_bbox = build_loss(loss_bbox)
  84. self.bbox_coder = build_bbox_coder(bbox_coder)
  85. self.prior_generator = MlvlPointGenerator(strides)
  86. # In order to keep a more general interface and be consistent with
  87. # anchor_head. We can think of point like one anchor
  88. self.num_base_priors = self.prior_generator.num_base_priors[0]
  89. self.train_cfg = train_cfg
  90. self.test_cfg = test_cfg
  91. self.conv_cfg = conv_cfg
  92. self.norm_cfg = norm_cfg
  93. self.fp16_enabled = False
  94. self._init_layers()
  95. def _init_layers(self):
  96. """Initialize layers of the head."""
  97. self._init_cls_convs()
  98. self._init_reg_convs()
  99. self._init_predictor()
  100. def _init_cls_convs(self):
  101. """Initialize classification conv layers of the head."""
  102. self.cls_convs = nn.ModuleList()
  103. for i in range(self.stacked_convs):
  104. chn = self.in_channels if i == 0 else self.feat_channels
  105. if self.dcn_on_last_conv and i == self.stacked_convs - 1:
  106. conv_cfg = dict(type='DCNv2')
  107. else:
  108. conv_cfg = self.conv_cfg
  109. self.cls_convs.append(
  110. ConvModule(
  111. chn,
  112. self.feat_channels,
  113. 3,
  114. stride=1,
  115. padding=1,
  116. conv_cfg=conv_cfg,
  117. norm_cfg=self.norm_cfg,
  118. bias=self.conv_bias))
  119. def _init_reg_convs(self):
  120. """Initialize bbox regression conv layers of the head."""
  121. self.reg_convs = nn.ModuleList()
  122. for i in range(self.stacked_convs):
  123. chn = self.in_channels if i == 0 else self.feat_channels
  124. if self.dcn_on_last_conv and i == self.stacked_convs - 1:
  125. conv_cfg = dict(type='DCNv2')
  126. else:
  127. conv_cfg = self.conv_cfg
  128. self.reg_convs.append(
  129. ConvModule(
  130. chn,
  131. self.feat_channels,
  132. 3,
  133. stride=1,
  134. padding=1,
  135. conv_cfg=conv_cfg,
  136. norm_cfg=self.norm_cfg,
  137. bias=self.conv_bias))
  138. def _init_predictor(self):
  139. """Initialize predictor layers of the head."""
  140. self.conv_cls = nn.Conv2d(
  141. self.feat_channels, self.cls_out_channels, 3, padding=1)
  142. self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  143. def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
  144. missing_keys, unexpected_keys, error_msgs):
  145. """Hack some keys of the model state dict so that can load checkpoints
  146. of previous version."""
  147. version = local_metadata.get('version', None)
  148. if version is None:
  149. # the key is different in early versions
  150. # for example, 'fcos_cls' become 'conv_cls' now
  151. bbox_head_keys = [
  152. k for k in state_dict.keys() if k.startswith(prefix)
  153. ]
  154. ori_predictor_keys = []
  155. new_predictor_keys = []
  156. # e.g. 'fcos_cls' or 'fcos_reg'
  157. for key in bbox_head_keys:
  158. ori_predictor_keys.append(key)
  159. key = key.split('.')
  160. conv_name = None
  161. if key[1].endswith('cls'):
  162. conv_name = 'conv_cls'
  163. elif key[1].endswith('reg'):
  164. conv_name = 'conv_reg'
  165. elif key[1].endswith('centerness'):
  166. conv_name = 'conv_centerness'
  167. else:
  168. assert NotImplementedError
  169. if conv_name is not None:
  170. key[1] = conv_name
  171. new_predictor_keys.append('.'.join(key))
  172. else:
  173. ori_predictor_keys.pop(-1)
  174. for i in range(len(new_predictor_keys)):
  175. state_dict[new_predictor_keys[i]] = state_dict.pop(
  176. ori_predictor_keys[i])
  177. super()._load_from_state_dict(state_dict, prefix, local_metadata,
  178. strict, missing_keys, unexpected_keys,
  179. error_msgs)
  180. def forward(self, feats):
  181. """Forward features from the upstream network.
  182. Args:
  183. feats (tuple[Tensor]): Features from the upstream network, each is
  184. a 4D-tensor.
  185. Returns:
  186. tuple: Usually contain classification scores and bbox predictions.
  187. cls_scores (list[Tensor]): Box scores for each scale level,
  188. each is a 4D-tensor, the channel number is
  189. num_points * num_classes.
  190. bbox_preds (list[Tensor]): Box energies / deltas for each scale
  191. level, each is a 4D-tensor, the channel number is
  192. num_points * 4.
  193. """
  194. return multi_apply(self.forward_single, feats)[:2]
  195. def forward_single(self, x):
  196. """Forward features of a single scale level.
  197. Args:
  198. x (Tensor): FPN feature maps of the specified stride.
  199. Returns:
  200. tuple: Scores for each class, bbox predictions, features
  201. after classification and regression conv layers, some
  202. models needs these features like FCOS.
  203. """
  204. cls_feat = x
  205. reg_feat = x
  206. for cls_layer in self.cls_convs:
  207. cls_feat = cls_layer(cls_feat)
  208. cls_score = self.conv_cls(cls_feat)
  209. for reg_layer in self.reg_convs:
  210. reg_feat = reg_layer(reg_feat)
  211. bbox_pred = self.conv_reg(reg_feat)
  212. return cls_score, bbox_pred, cls_feat, reg_feat
  213. @abstractmethod
  214. @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
  215. def loss(self,
  216. cls_scores,
  217. bbox_preds,
  218. gt_bboxes,
  219. gt_labels,
  220. img_metas,
  221. gt_bboxes_ignore=None):
  222. """Compute loss of the head.
  223. Args:
  224. cls_scores (list[Tensor]): Box scores for each scale level,
  225. each is a 4D-tensor, the channel number is
  226. num_points * num_classes.
  227. bbox_preds (list[Tensor]): Box energies / deltas for each scale
  228. level, each is a 4D-tensor, the channel number is
  229. num_points * 4.
  230. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
  231. shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
  232. gt_labels (list[Tensor]): class indices corresponding to each box
  233. img_metas (list[dict]): Meta information of each image, e.g.,
  234. image size, scaling factor, etc.
  235. gt_bboxes_ignore (None | list[Tensor]): specify which bounding
  236. boxes can be ignored when computing the loss.
  237. """
  238. raise NotImplementedError
  239. @abstractmethod
  240. def get_targets(self, points, gt_bboxes_list, gt_labels_list):
  241. """Compute regression, classification and centerness targets for points
  242. in multiple images.
  243. Args:
  244. points (list[Tensor]): Points of each fpn level, each has shape
  245. (num_points, 2).
  246. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
  247. each has shape (num_gt, 4).
  248. gt_labels_list (list[Tensor]): Ground truth labels of each box,
  249. each has shape (num_gt,).
  250. """
  251. raise NotImplementedError
  252. def _get_points_single(self,
  253. featmap_size,
  254. stride,
  255. dtype,
  256. device,
  257. flatten=False):
  258. """Get points of a single scale level.
  259. This function will be deprecated soon.
  260. """
  261. warnings.warn(
  262. '`_get_points_single` in `AnchorFreeHead` will be '
  263. 'deprecated soon, we support a multi level point generator now'
  264. 'you can get points of a single level feature map '
  265. 'with `self.prior_generator.single_level_grid_priors` ')
  266. h, w = featmap_size
  267. # First create Range with the default dtype, than convert to
  268. # target `dtype` for onnx exporting.
  269. x_range = torch.arange(w, device=device).to(dtype)
  270. y_range = torch.arange(h, device=device).to(dtype)
  271. y, x = torch.meshgrid(y_range, x_range)
  272. if flatten:
  273. y = y.flatten()
  274. x = x.flatten()
  275. return y, x
  276. def get_points(self, featmap_sizes, dtype, device, flatten=False):
  277. """Get points according to feature map sizes.
  278. Args:
  279. featmap_sizes (list[tuple]): Multi-level feature map sizes.
  280. dtype (torch.dtype): Type of points.
  281. device (torch.device): Device of points.
  282. Returns:
  283. tuple: points of each image.
  284. """
  285. warnings.warn(
  286. '`get_points` in `AnchorFreeHead` will be '
  287. 'deprecated soon, we support a multi level point generator now'
  288. 'you can get points of all levels '
  289. 'with `self.prior_generator.grid_priors` ')
  290. mlvl_points = []
  291. for i in range(len(featmap_sizes)):
  292. mlvl_points.append(
  293. self._get_points_single(featmap_sizes[i], self.strides[i],
  294. dtype, device, flatten))
  295. return mlvl_points
  296. def aug_test(self, feats, img_metas, rescale=False):
  297. """Test function with test time augmentation.
  298. Args:
  299. feats (list[Tensor]): the outer list indicates test-time
  300. augmentations and inner Tensor should have a shape NxCxHxW,
  301. which contains features for all images in the batch.
  302. img_metas (list[list[dict]]): the outer list indicates test-time
  303. augs (multiscale, flip, etc.) and the inner list indicates
  304. images in a batch. each dict has image information.
  305. rescale (bool, optional): Whether to rescale the results.
  306. Defaults to False.
  307. Returns:
  308. list[ndarray]: bbox results of each class
  309. """
  310. return self.aug_test_bboxes(feats, img_metas, rescale=rescale)

No Description

Contributors (3)