You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nasfcos_head.py 2.9 kB

2 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import copy
  3. import torch.nn as nn
  4. from mmcv.cnn import ConvModule, Scale
  5. from mmdet.models.dense_heads.fcos_head import FCOSHead
  6. from ..builder import HEADS
  7. @HEADS.register_module()
  8. class NASFCOSHead(FCOSHead):
  9. """Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
  10. It is quite similar with FCOS head, except for the searched structure of
  11. classification branch and bbox regression branch, where a structure of
  12. "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
  13. """
  14. def __init__(self, *args, init_cfg=None, **kwargs):
  15. if init_cfg is None:
  16. init_cfg = [
  17. dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
  18. dict(
  19. type='Normal',
  20. std=0.01,
  21. override=[
  22. dict(name='conv_reg'),
  23. dict(name='conv_centerness'),
  24. dict(
  25. name='conv_cls',
  26. type='Normal',
  27. std=0.01,
  28. bias_prob=0.01)
  29. ]),
  30. ]
  31. super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
  32. def _init_layers(self):
  33. """Initialize layers of the head."""
  34. dconv3x3_config = dict(
  35. type='DCNv2',
  36. kernel_size=3,
  37. use_bias=True,
  38. deform_groups=2,
  39. padding=1)
  40. conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
  41. conv1x1_config = dict(type='Conv', kernel_size=1)
  42. self.arch_config = [
  43. dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
  44. ]
  45. self.cls_convs = nn.ModuleList()
  46. self.reg_convs = nn.ModuleList()
  47. for i, op_ in enumerate(self.arch_config):
  48. op = copy.deepcopy(op_)
  49. chn = self.in_channels if i == 0 else self.feat_channels
  50. assert isinstance(op, dict)
  51. use_bias = op.pop('use_bias', False)
  52. padding = op.pop('padding', 0)
  53. kernel_size = op.pop('kernel_size')
  54. module = ConvModule(
  55. chn,
  56. self.feat_channels,
  57. kernel_size,
  58. stride=1,
  59. padding=padding,
  60. norm_cfg=self.norm_cfg,
  61. bias=use_bias,
  62. conv_cfg=op)
  63. self.cls_convs.append(copy.deepcopy(module))
  64. self.reg_convs.append(copy.deepcopy(module))
  65. self.conv_cls = nn.Conv2d(
  66. self.feat_channels, self.cls_out_channels, 3, padding=1)
  67. self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  68. self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
  69. self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])

No Description

Contributors (3)