You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

vfnet_head.py 31 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import warnings
  3. import numpy as np
  4. import torch
  5. import torch.nn as nn
  6. from mmcv.cnn import ConvModule, Scale
  7. from mmcv.ops import DeformConv2d
  8. from mmcv.runner import force_fp32
  9. from mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner,
  10. build_prior_generator, build_sampler, multi_apply,
  11. reduce_mean)
  12. from ..builder import HEADS, build_loss
  13. from .atss_head import ATSSHead
  14. from .fcos_head import FCOSHead
  15. INF = 1e8
  16. @HEADS.register_module()
  17. class VFNetHead(ATSSHead, FCOSHead):
  18. """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
  19. Detector.<https://arxiv.org/abs/2008.13367>`_.
  20. The VFNet predicts IoU-aware classification scores which mix the
  21. object presence confidence and object localization accuracy as the
  22. detection score. It is built on the FCOS architecture and uses ATSS
  23. for defining positive/negative training examples. The VFNet is trained
  24. with Varifocal Loss and empolys star-shaped deformable convolution to
  25. extract features for a bbox.
  26. Args:
  27. num_classes (int): Number of categories excluding the background
  28. category.
  29. in_channels (int): Number of channels in the input feature map.
  30. regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
  31. level points.
  32. center_sampling (bool): If true, use center sampling. Default: False.
  33. center_sample_radius (float): Radius of center sampling. Default: 1.5.
  34. sync_num_pos (bool): If true, synchronize the number of positive
  35. examples across GPUs. Default: True
  36. gradient_mul (float): The multiplier to gradients from bbox refinement
  37. and recognition. Default: 0.1.
  38. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
  39. 'stride'. Default: reg_denom
  40. loss_cls_fl (dict): Config of focal loss.
  41. use_vfl (bool): If true, use varifocal loss for training.
  42. Default: True.
  43. loss_cls (dict): Config of varifocal loss.
  44. loss_bbox (dict): Config of localization loss, GIoU Loss.
  45. loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
  46. norm_cfg (dict): dictionary to construct and config norm layer.
  47. Default: norm_cfg=dict(type='GN', num_groups=32,
  48. requires_grad=True).
  49. use_atss (bool): If true, use ATSS to define positive/negative
  50. examples. Default: True.
  51. anchor_generator (dict): Config of anchor generator for ATSS.
  52. init_cfg (dict or list[dict], optional): Initialization config dict.
  53. Example:
  54. >>> self = VFNetHead(11, 7)
  55. >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
  56. >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
  57. >>> assert len(cls_score) == len(self.scales)
  58. """ # noqa: E501
  59. def __init__(self,
  60. num_classes,
  61. in_channels,
  62. regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
  63. (512, INF)),
  64. center_sampling=False,
  65. center_sample_radius=1.5,
  66. sync_num_pos=True,
  67. gradient_mul=0.1,
  68. bbox_norm_type='reg_denom',
  69. loss_cls_fl=dict(
  70. type='FocalLoss',
  71. use_sigmoid=True,
  72. gamma=2.0,
  73. alpha=0.25,
  74. loss_weight=1.0),
  75. use_vfl=True,
  76. loss_cls=dict(
  77. type='VarifocalLoss',
  78. use_sigmoid=True,
  79. alpha=0.75,
  80. gamma=2.0,
  81. iou_weighted=True,
  82. loss_weight=1.0),
  83. loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
  84. loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
  85. norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
  86. use_atss=True,
  87. reg_decoded_bbox=True,
  88. anchor_generator=dict(
  89. type='AnchorGenerator',
  90. ratios=[1.0],
  91. octave_base_scale=8,
  92. scales_per_octave=1,
  93. center_offset=0.0,
  94. strides=[8, 16, 32, 64, 128]),
  95. init_cfg=dict(
  96. type='Normal',
  97. layer='Conv2d',
  98. std=0.01,
  99. override=dict(
  100. type='Normal',
  101. name='vfnet_cls',
  102. std=0.01,
  103. bias_prob=0.01)),
  104. **kwargs):
  105. # dcn base offsets, adapted from reppoints_head.py
  106. self.num_dconv_points = 9
  107. self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
  108. self.dcn_pad = int((self.dcn_kernel - 1) / 2)
  109. dcn_base = np.arange(-self.dcn_pad,
  110. self.dcn_pad + 1).astype(np.float64)
  111. dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
  112. dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
  113. dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
  114. (-1))
  115. self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
  116. super(FCOSHead, self).__init__(
  117. num_classes,
  118. in_channels,
  119. norm_cfg=norm_cfg,
  120. init_cfg=init_cfg,
  121. **kwargs)
  122. self.regress_ranges = regress_ranges
  123. self.reg_denoms = [
  124. regress_range[-1] for regress_range in regress_ranges
  125. ]
  126. self.reg_denoms[-1] = self.reg_denoms[-2] * 2
  127. self.center_sampling = center_sampling
  128. self.center_sample_radius = center_sample_radius
  129. self.sync_num_pos = sync_num_pos
  130. self.bbox_norm_type = bbox_norm_type
  131. self.gradient_mul = gradient_mul
  132. self.use_vfl = use_vfl
  133. if self.use_vfl:
  134. self.loss_cls = build_loss(loss_cls)
  135. else:
  136. self.loss_cls = build_loss(loss_cls_fl)
  137. self.loss_bbox = build_loss(loss_bbox)
  138. self.loss_bbox_refine = build_loss(loss_bbox_refine)
  139. # for getting ATSS targets
  140. self.use_atss = use_atss
  141. self.reg_decoded_bbox = reg_decoded_bbox
  142. self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
  143. self.anchor_center_offset = anchor_generator['center_offset']
  144. self.num_base_priors = self.prior_generator.num_base_priors[0]
  145. self.sampling = False
  146. if self.train_cfg:
  147. self.assigner = build_assigner(self.train_cfg.assigner)
  148. sampler_cfg = dict(type='PseudoSampler')
  149. self.sampler = build_sampler(sampler_cfg, context=self)
  150. # only be used in `get_atss_targets` when `use_atss` is True
  151. self.atss_prior_generator = build_prior_generator(anchor_generator)
  152. self.fcos_prior_generator = MlvlPointGenerator(
  153. anchor_generator['strides'],
  154. self.anchor_center_offset if self.use_atss else 0.5)
  155. # In order to reuse the `get_bboxes` in `BaseDenseHead.
  156. # Only be used in testing phase.
  157. self.prior_generator = self.fcos_prior_generator
  158. @property
  159. def num_anchors(self):
  160. """
  161. Returns:
  162. int: Number of anchors on each point of feature map.
  163. """
  164. warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '
  165. 'please use "num_base_priors" instead')
  166. return self.num_base_priors
  167. @property
  168. def anchor_generator(self):
  169. warnings.warn('DeprecationWarning: anchor_generator is deprecated, '
  170. 'please use "atss_prior_generator" instead')
  171. return self.prior_generator
  172. def _init_layers(self):
  173. """Initialize layers of the head."""
  174. super(FCOSHead, self)._init_cls_convs()
  175. super(FCOSHead, self)._init_reg_convs()
  176. self.relu = nn.ReLU(inplace=True)
  177. self.vfnet_reg_conv = ConvModule(
  178. self.feat_channels,
  179. self.feat_channels,
  180. 3,
  181. stride=1,
  182. padding=1,
  183. conv_cfg=self.conv_cfg,
  184. norm_cfg=self.norm_cfg,
  185. bias=self.conv_bias)
  186. self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  187. self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
  188. self.vfnet_reg_refine_dconv = DeformConv2d(
  189. self.feat_channels,
  190. self.feat_channels,
  191. self.dcn_kernel,
  192. 1,
  193. padding=self.dcn_pad)
  194. self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  195. self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
  196. self.vfnet_cls_dconv = DeformConv2d(
  197. self.feat_channels,
  198. self.feat_channels,
  199. self.dcn_kernel,
  200. 1,
  201. padding=self.dcn_pad)
  202. self.vfnet_cls = nn.Conv2d(
  203. self.feat_channels, self.cls_out_channels, 3, padding=1)
  204. def forward(self, feats):
  205. """Forward features from the upstream network.
  206. Args:
  207. feats (tuple[Tensor]): Features from the upstream network, each is
  208. a 4D-tensor.
  209. Returns:
  210. tuple:
  211. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  212. level, each is a 4D-tensor, the channel number is
  213. num_points * num_classes.
  214. bbox_preds (list[Tensor]): Box offsets for each
  215. scale level, each is a 4D-tensor, the channel number is
  216. num_points * 4.
  217. bbox_preds_refine (list[Tensor]): Refined Box offsets for
  218. each scale level, each is a 4D-tensor, the channel
  219. number is num_points * 4.
  220. """
  221. return multi_apply(self.forward_single, feats, self.scales,
  222. self.scales_refine, self.strides, self.reg_denoms)
  223. def forward_single(self, x, scale, scale_refine, stride, reg_denom):
  224. """Forward features of a single scale level.
  225. Args:
  226. x (Tensor): FPN feature maps of the specified stride.
  227. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
  228. the bbox prediction.
  229. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
  230. resize the refined bbox prediction.
  231. stride (int): The corresponding stride for feature maps,
  232. used to normalize the bbox prediction when
  233. bbox_norm_type = 'stride'.
  234. reg_denom (int): The corresponding regression range for feature
  235. maps, only used to normalize the bbox prediction when
  236. bbox_norm_type = 'reg_denom'.
  237. Returns:
  238. tuple: iou-aware cls scores for each box, bbox predictions and
  239. refined bbox predictions of input feature maps.
  240. """
  241. cls_feat = x
  242. reg_feat = x
  243. for cls_layer in self.cls_convs:
  244. cls_feat = cls_layer(cls_feat)
  245. for reg_layer in self.reg_convs:
  246. reg_feat = reg_layer(reg_feat)
  247. # predict the bbox_pred of different level
  248. reg_feat_init = self.vfnet_reg_conv(reg_feat)
  249. if self.bbox_norm_type == 'reg_denom':
  250. bbox_pred = scale(
  251. self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
  252. elif self.bbox_norm_type == 'stride':
  253. bbox_pred = scale(
  254. self.vfnet_reg(reg_feat_init)).float().exp() * stride
  255. else:
  256. raise NotImplementedError
  257. # compute star deformable convolution offsets
  258. # converting dcn_offset to reg_feat.dtype thus VFNet can be
  259. # trained with FP16
  260. dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
  261. stride).to(reg_feat.dtype)
  262. # refine the bbox_pred
  263. reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
  264. bbox_pred_refine = scale_refine(
  265. self.vfnet_reg_refine(reg_feat)).float().exp()
  266. bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
  267. # predict the iou-aware cls score
  268. cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
  269. cls_score = self.vfnet_cls(cls_feat)
  270. if self.training:
  271. return cls_score, bbox_pred, bbox_pred_refine
  272. else:
  273. return cls_score, bbox_pred_refine
  274. def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
  275. """Compute the star deformable conv offsets.
  276. Args:
  277. bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
  278. gradient_mul (float): Gradient multiplier.
  279. stride (int): The corresponding stride for feature maps,
  280. used to project the bbox onto the feature map.
  281. Returns:
  282. dcn_offsets (Tensor): The offsets for deformable convolution.
  283. """
  284. dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
  285. bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
  286. gradient_mul * bbox_pred
  287. # map to the feature map scale
  288. bbox_pred_grad_mul = bbox_pred_grad_mul / stride
  289. N, C, H, W = bbox_pred.size()
  290. x1 = bbox_pred_grad_mul[:, 0, :, :]
  291. y1 = bbox_pred_grad_mul[:, 1, :, :]
  292. x2 = bbox_pred_grad_mul[:, 2, :, :]
  293. y2 = bbox_pred_grad_mul[:, 3, :, :]
  294. bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
  295. N, 2 * self.num_dconv_points, H, W)
  296. bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
  297. bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
  298. bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
  299. bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
  300. bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
  301. bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
  302. bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
  303. bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
  304. bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
  305. bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
  306. bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
  307. bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
  308. dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
  309. return dcn_offset
  310. @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
  311. def loss(self,
  312. cls_scores,
  313. bbox_preds,
  314. bbox_preds_refine,
  315. gt_bboxes,
  316. gt_labels,
  317. img_metas,
  318. gt_bboxes_ignore=None):
  319. """Compute loss of the head.
  320. Args:
  321. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  322. level, each is a 4D-tensor, the channel number is
  323. num_points * num_classes.
  324. bbox_preds (list[Tensor]): Box offsets for each
  325. scale level, each is a 4D-tensor, the channel number is
  326. num_points * 4.
  327. bbox_preds_refine (list[Tensor]): Refined Box offsets for
  328. each scale level, each is a 4D-tensor, the channel
  329. number is num_points * 4.
  330. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
  331. shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
  332. gt_labels (list[Tensor]): class indices corresponding to each box
  333. img_metas (list[dict]): Meta information of each image, e.g.,
  334. image size, scaling factor, etc.
  335. gt_bboxes_ignore (None | list[Tensor]): specify which bounding
  336. boxes can be ignored when computing the loss.
  337. Default: None.
  338. Returns:
  339. dict[str, Tensor]: A dictionary of loss components.
  340. """
  341. assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
  342. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
  343. all_level_points = self.fcos_prior_generator.grid_priors(
  344. featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)
  345. labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
  346. cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
  347. gt_bboxes_ignore)
  348. num_imgs = cls_scores[0].size(0)
  349. # flatten cls_scores, bbox_preds and bbox_preds_refine
  350. flatten_cls_scores = [
  351. cls_score.permute(0, 2, 3,
  352. 1).reshape(-1,
  353. self.cls_out_channels).contiguous()
  354. for cls_score in cls_scores
  355. ]
  356. flatten_bbox_preds = [
  357. bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
  358. for bbox_pred in bbox_preds
  359. ]
  360. flatten_bbox_preds_refine = [
  361. bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
  362. for bbox_pred_refine in bbox_preds_refine
  363. ]
  364. flatten_cls_scores = torch.cat(flatten_cls_scores)
  365. flatten_bbox_preds = torch.cat(flatten_bbox_preds)
  366. flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
  367. flatten_labels = torch.cat(labels)
  368. flatten_bbox_targets = torch.cat(bbox_targets)
  369. # repeat points to align with bbox_preds
  370. flatten_points = torch.cat(
  371. [points.repeat(num_imgs, 1) for points in all_level_points])
  372. # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
  373. bg_class_ind = self.num_classes
  374. pos_inds = torch.where(
  375. ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
  376. num_pos = len(pos_inds)
  377. pos_bbox_preds = flatten_bbox_preds[pos_inds]
  378. pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
  379. pos_labels = flatten_labels[pos_inds]
  380. # sync num_pos across all gpus
  381. if self.sync_num_pos:
  382. num_pos_avg_per_gpu = reduce_mean(
  383. pos_inds.new_tensor(num_pos).float()).item()
  384. num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
  385. else:
  386. num_pos_avg_per_gpu = num_pos
  387. pos_bbox_targets = flatten_bbox_targets[pos_inds]
  388. pos_points = flatten_points[pos_inds]
  389. pos_decoded_bbox_preds = self.bbox_coder.decode(
  390. pos_points, pos_bbox_preds)
  391. pos_decoded_target_preds = self.bbox_coder.decode(
  392. pos_points, pos_bbox_targets)
  393. iou_targets_ini = bbox_overlaps(
  394. pos_decoded_bbox_preds,
  395. pos_decoded_target_preds.detach(),
  396. is_aligned=True).clamp(min=1e-6)
  397. bbox_weights_ini = iou_targets_ini.clone().detach()
  398. bbox_avg_factor_ini = reduce_mean(
  399. bbox_weights_ini.sum()).clamp_(min=1).item()
  400. pos_decoded_bbox_preds_refine = \
  401. self.bbox_coder.decode(pos_points, pos_bbox_preds_refine)
  402. iou_targets_rf = bbox_overlaps(
  403. pos_decoded_bbox_preds_refine,
  404. pos_decoded_target_preds.detach(),
  405. is_aligned=True).clamp(min=1e-6)
  406. bbox_weights_rf = iou_targets_rf.clone().detach()
  407. bbox_avg_factor_rf = reduce_mean(
  408. bbox_weights_rf.sum()).clamp_(min=1).item()
  409. if num_pos > 0:
  410. loss_bbox = self.loss_bbox(
  411. pos_decoded_bbox_preds,
  412. pos_decoded_target_preds.detach(),
  413. weight=bbox_weights_ini,
  414. avg_factor=bbox_avg_factor_ini)
  415. loss_bbox_refine = self.loss_bbox_refine(
  416. pos_decoded_bbox_preds_refine,
  417. pos_decoded_target_preds.detach(),
  418. weight=bbox_weights_rf,
  419. avg_factor=bbox_avg_factor_rf)
  420. # build IoU-aware cls_score targets
  421. if self.use_vfl:
  422. pos_ious = iou_targets_rf.clone().detach()
  423. cls_iou_targets = torch.zeros_like(flatten_cls_scores)
  424. cls_iou_targets[pos_inds, pos_labels] = pos_ious
  425. else:
  426. loss_bbox = pos_bbox_preds.sum() * 0
  427. loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
  428. if self.use_vfl:
  429. cls_iou_targets = torch.zeros_like(flatten_cls_scores)
  430. if self.use_vfl:
  431. loss_cls = self.loss_cls(
  432. flatten_cls_scores,
  433. cls_iou_targets,
  434. avg_factor=num_pos_avg_per_gpu)
  435. else:
  436. loss_cls = self.loss_cls(
  437. flatten_cls_scores,
  438. flatten_labels,
  439. weight=label_weights,
  440. avg_factor=num_pos_avg_per_gpu)
  441. return dict(
  442. loss_cls=loss_cls,
  443. loss_bbox=loss_bbox,
  444. loss_bbox_rf=loss_bbox_refine)
  445. def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
  446. img_metas, gt_bboxes_ignore):
  447. """A wrapper for computing ATSS and FCOS targets for points in multiple
  448. images.
  449. Args:
  450. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  451. level with shape (N, num_points * num_classes, H, W).
  452. mlvl_points (list[Tensor]): Points of each fpn level, each has
  453. shape (num_points, 2).
  454. gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
  455. each has shape (num_gt, 4).
  456. gt_labels (list[Tensor]): Ground truth labels of each box,
  457. each has shape (num_gt,).
  458. img_metas (list[dict]): Meta information of each image, e.g.,
  459. image size, scaling factor, etc.
  460. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
  461. ignored, shape (num_ignored_gts, 4).
  462. Returns:
  463. tuple:
  464. labels_list (list[Tensor]): Labels of each level.
  465. label_weights (Tensor/None): Label weights of all levels.
  466. bbox_targets_list (list[Tensor]): Regression targets of each
  467. level, (l, t, r, b).
  468. bbox_weights (Tensor/None): Bbox weights of all levels.
  469. """
  470. if self.use_atss:
  471. return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
  472. gt_labels, img_metas,
  473. gt_bboxes_ignore)
  474. else:
  475. self.norm_on_bbox = False
  476. return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
  477. def _get_target_single(self, *args, **kwargs):
  478. """Avoid ambiguity in multiple inheritance."""
  479. if self.use_atss:
  480. return ATSSHead._get_target_single(self, *args, **kwargs)
  481. else:
  482. return FCOSHead._get_target_single(self, *args, **kwargs)
  483. def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
  484. """Compute FCOS regression and classification targets for points in
  485. multiple images.
  486. Args:
  487. points (list[Tensor]): Points of each fpn level, each has shape
  488. (num_points, 2).
  489. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
  490. each has shape (num_gt, 4).
  491. gt_labels_list (list[Tensor]): Ground truth labels of each box,
  492. each has shape (num_gt,).
  493. Returns:
  494. tuple:
  495. labels (list[Tensor]): Labels of each level.
  496. label_weights: None, to be compatible with ATSS targets.
  497. bbox_targets (list[Tensor]): BBox targets of each level.
  498. bbox_weights: None, to be compatible with ATSS targets.
  499. """
  500. labels, bbox_targets = FCOSHead.get_targets(self, points,
  501. gt_bboxes_list,
  502. gt_labels_list)
  503. label_weights = None
  504. bbox_weights = None
  505. return labels, label_weights, bbox_targets, bbox_weights
  506. def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
  507. """Get anchors according to feature map sizes.
  508. Args:
  509. featmap_sizes (list[tuple]): Multi-level feature map sizes.
  510. img_metas (list[dict]): Image meta info.
  511. device (torch.device | str): Device for returned tensors
  512. Returns:
  513. tuple:
  514. anchor_list (list[Tensor]): Anchors of each image.
  515. valid_flag_list (list[Tensor]): Valid flags of each image.
  516. """
  517. num_imgs = len(img_metas)
  518. # since feature map sizes of all images are the same, we only compute
  519. # anchors for one time
  520. multi_level_anchors = self.atss_prior_generator.grid_priors(
  521. featmap_sizes, device=device)
  522. anchor_list = [multi_level_anchors for _ in range(num_imgs)]
  523. # for each image, we compute valid flags of multi level anchors
  524. valid_flag_list = []
  525. for img_id, img_meta in enumerate(img_metas):
  526. multi_level_flags = self.atss_prior_generator.valid_flags(
  527. featmap_sizes, img_meta['pad_shape'], device=device)
  528. valid_flag_list.append(multi_level_flags)
  529. return anchor_list, valid_flag_list
  530. def get_atss_targets(self,
  531. cls_scores,
  532. mlvl_points,
  533. gt_bboxes,
  534. gt_labels,
  535. img_metas,
  536. gt_bboxes_ignore=None):
  537. """A wrapper for computing ATSS targets for points in multiple images.
  538. Args:
  539. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  540. level with shape (N, num_points * num_classes, H, W).
  541. mlvl_points (list[Tensor]): Points of each fpn level, each has
  542. shape (num_points, 2).
  543. gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
  544. each has shape (num_gt, 4).
  545. gt_labels (list[Tensor]): Ground truth labels of each box,
  546. each has shape (num_gt,).
  547. img_metas (list[dict]): Meta information of each image, e.g.,
  548. image size, scaling factor, etc.
  549. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
  550. ignored, shape (num_ignored_gts, 4). Default: None.
  551. Returns:
  552. tuple:
  553. labels_list (list[Tensor]): Labels of each level.
  554. label_weights (Tensor): Label weights of all levels.
  555. bbox_targets_list (list[Tensor]): Regression targets of each
  556. level, (l, t, r, b).
  557. bbox_weights (Tensor): Bbox weights of all levels.
  558. """
  559. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
  560. assert len(
  561. featmap_sizes
  562. ) == self.atss_prior_generator.num_levels == \
  563. self.fcos_prior_generator.num_levels
  564. device = cls_scores[0].device
  565. anchor_list, valid_flag_list = self.get_anchors(
  566. featmap_sizes, img_metas, device=device)
  567. label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
  568. cls_reg_targets = ATSSHead.get_targets(
  569. self,
  570. anchor_list,
  571. valid_flag_list,
  572. gt_bboxes,
  573. img_metas,
  574. gt_bboxes_ignore_list=gt_bboxes_ignore,
  575. gt_labels_list=gt_labels,
  576. label_channels=label_channels,
  577. unmap_outputs=True)
  578. if cls_reg_targets is None:
  579. return None
  580. (anchor_list, labels_list, label_weights_list, bbox_targets_list,
  581. bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
  582. bbox_targets_list = [
  583. bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
  584. ]
  585. num_imgs = len(img_metas)
  586. # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
  587. bbox_targets_list = self.transform_bbox_targets(
  588. bbox_targets_list, mlvl_points, num_imgs)
  589. labels_list = [labels.reshape(-1) for labels in labels_list]
  590. label_weights_list = [
  591. label_weights.reshape(-1) for label_weights in label_weights_list
  592. ]
  593. bbox_weights_list = [
  594. bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
  595. ]
  596. label_weights = torch.cat(label_weights_list)
  597. bbox_weights = torch.cat(bbox_weights_list)
  598. return labels_list, label_weights, bbox_targets_list, bbox_weights
  599. def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
  600. """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
  601. Args:
  602. decoded_bboxes (list[Tensor]): Regression targets of each level,
  603. in the form of (x1, y1, x2, y2).
  604. mlvl_points (list[Tensor]): Points of each fpn level, each has
  605. shape (num_points, 2).
  606. num_imgs (int): the number of images in a batch.
  607. Returns:
  608. bbox_targets (list[Tensor]): Regression targets of each level in
  609. the form of (l, t, r, b).
  610. """
  611. # TODO: Re-implemented in Class PointCoder
  612. assert len(decoded_bboxes) == len(mlvl_points)
  613. num_levels = len(decoded_bboxes)
  614. mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
  615. bbox_targets = []
  616. for i in range(num_levels):
  617. bbox_target = self.bbox_coder.encode(mlvl_points[i],
  618. decoded_bboxes[i])
  619. bbox_targets.append(bbox_target)
  620. return bbox_targets
  621. def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
  622. missing_keys, unexpected_keys, error_msgs):
  623. """Override the method in the parent class to avoid changing para's
  624. name."""
  625. pass
  626. def _get_points_single(self,
  627. featmap_size,
  628. stride,
  629. dtype,
  630. device,
  631. flatten=False):
  632. """Get points according to feature map size.
  633. This function will be deprecated soon.
  634. """
  635. warnings.warn(
  636. '`_get_points_single` in `VFNetHead` will be '
  637. 'deprecated soon, we support a multi level point generator now'
  638. 'you can get points of a single level feature map'
  639. 'with `self.fcos_prior_generator.single_level_grid_priors` ')
  640. h, w = featmap_size
  641. x_range = torch.arange(
  642. 0, w * stride, stride, dtype=dtype, device=device)
  643. y_range = torch.arange(
  644. 0, h * stride, stride, dtype=dtype, device=device)
  645. y, x = torch.meshgrid(y_range, x_range)
  646. # to be compatible with anchor points in ATSS
  647. if self.use_atss:
  648. points = torch.stack(
  649. (x.reshape(-1), y.reshape(-1)), dim=-1) + \
  650. stride * self.anchor_center_offset
  651. else:
  652. points = torch.stack(
  653. (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
  654. return points

No Description

Contributors (3)