You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_resnet.py 22 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import pytest
  3. import torch
  4. from mmcv import assert_params_all_zeros
  5. from mmcv.ops import DeformConv2dPack
  6. from torch.nn.modules import AvgPool2d, GroupNorm
  7. from torch.nn.modules.batchnorm import _BatchNorm
  8. from mmdet.models.backbones import ResNet, ResNetV1d
  9. from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
  10. from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
  11. from .utils import check_norm_state, is_block, is_norm
  12. def test_resnet_basic_block():
  13. with pytest.raises(AssertionError):
  14. # Not implemented yet.
  15. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
  16. BasicBlock(64, 64, dcn=dcn)
  17. with pytest.raises(AssertionError):
  18. # Not implemented yet.
  19. plugins = [
  20. dict(
  21. cfg=dict(type='ContextBlock', ratio=1. / 16),
  22. position='after_conv3')
  23. ]
  24. BasicBlock(64, 64, plugins=plugins)
  25. with pytest.raises(AssertionError):
  26. # Not implemented yet
  27. plugins = [
  28. dict(
  29. cfg=dict(
  30. type='GeneralizedAttention',
  31. spatial_range=-1,
  32. num_heads=8,
  33. attention_type='0010',
  34. kv_stride=2),
  35. position='after_conv2')
  36. ]
  37. BasicBlock(64, 64, plugins=plugins)
  38. # test BasicBlock structure and forward
  39. block = BasicBlock(64, 64)
  40. assert block.conv1.in_channels == 64
  41. assert block.conv1.out_channels == 64
  42. assert block.conv1.kernel_size == (3, 3)
  43. assert block.conv2.in_channels == 64
  44. assert block.conv2.out_channels == 64
  45. assert block.conv2.kernel_size == (3, 3)
  46. x = torch.randn(1, 64, 56, 56)
  47. x_out = block(x)
  48. assert x_out.shape == torch.Size([1, 64, 56, 56])
  49. # Test BasicBlock with checkpoint forward
  50. block = BasicBlock(64, 64, with_cp=True)
  51. assert block.with_cp
  52. x = torch.randn(1, 64, 56, 56)
  53. x_out = block(x)
  54. assert x_out.shape == torch.Size([1, 64, 56, 56])
  55. def test_resnet_bottleneck():
  56. with pytest.raises(AssertionError):
  57. # Style must be in ['pytorch', 'caffe']
  58. Bottleneck(64, 64, style='tensorflow')
  59. with pytest.raises(AssertionError):
  60. # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
  61. plugins = [
  62. dict(
  63. cfg=dict(type='ContextBlock', ratio=1. / 16),
  64. position='after_conv4')
  65. ]
  66. Bottleneck(64, 16, plugins=plugins)
  67. with pytest.raises(AssertionError):
  68. # Need to specify different postfix to avoid duplicate plugin name
  69. plugins = [
  70. dict(
  71. cfg=dict(type='ContextBlock', ratio=1. / 16),
  72. position='after_conv3'),
  73. dict(
  74. cfg=dict(type='ContextBlock', ratio=1. / 16),
  75. position='after_conv3')
  76. ]
  77. Bottleneck(64, 16, plugins=plugins)
  78. with pytest.raises(KeyError):
  79. # Plugin type is not supported
  80. plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
  81. Bottleneck(64, 16, plugins=plugins)
  82. # Test Bottleneck with checkpoint forward
  83. block = Bottleneck(64, 16, with_cp=True)
  84. assert block.with_cp
  85. x = torch.randn(1, 64, 56, 56)
  86. x_out = block(x)
  87. assert x_out.shape == torch.Size([1, 64, 56, 56])
  88. # Test Bottleneck style
  89. block = Bottleneck(64, 64, stride=2, style='pytorch')
  90. assert block.conv1.stride == (1, 1)
  91. assert block.conv2.stride == (2, 2)
  92. block = Bottleneck(64, 64, stride=2, style='caffe')
  93. assert block.conv1.stride == (2, 2)
  94. assert block.conv2.stride == (1, 1)
  95. # Test Bottleneck DCN
  96. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
  97. with pytest.raises(AssertionError):
  98. Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
  99. block = Bottleneck(64, 64, dcn=dcn)
  100. assert isinstance(block.conv2, DeformConv2dPack)
  101. # Test Bottleneck forward
  102. block = Bottleneck(64, 16)
  103. x = torch.randn(1, 64, 56, 56)
  104. x_out = block(x)
  105. assert x_out.shape == torch.Size([1, 64, 56, 56])
  106. # Test Bottleneck with 1 ContextBlock after conv3
  107. plugins = [
  108. dict(
  109. cfg=dict(type='ContextBlock', ratio=1. / 16),
  110. position='after_conv3')
  111. ]
  112. block = Bottleneck(64, 16, plugins=plugins)
  113. assert block.context_block.in_channels == 64
  114. x = torch.randn(1, 64, 56, 56)
  115. x_out = block(x)
  116. assert x_out.shape == torch.Size([1, 64, 56, 56])
  117. # Test Bottleneck with 1 GeneralizedAttention after conv2
  118. plugins = [
  119. dict(
  120. cfg=dict(
  121. type='GeneralizedAttention',
  122. spatial_range=-1,
  123. num_heads=8,
  124. attention_type='0010',
  125. kv_stride=2),
  126. position='after_conv2')
  127. ]
  128. block = Bottleneck(64, 16, plugins=plugins)
  129. assert block.gen_attention_block.in_channels == 16
  130. x = torch.randn(1, 64, 56, 56)
  131. x_out = block(x)
  132. assert x_out.shape == torch.Size([1, 64, 56, 56])
  133. # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
  134. # after conv2, 1 ContextBlock after conv3
  135. plugins = [
  136. dict(
  137. cfg=dict(
  138. type='GeneralizedAttention',
  139. spatial_range=-1,
  140. num_heads=8,
  141. attention_type='0010',
  142. kv_stride=2),
  143. position='after_conv2'),
  144. dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
  145. dict(
  146. cfg=dict(type='ContextBlock', ratio=1. / 16),
  147. position='after_conv3')
  148. ]
  149. block = Bottleneck(64, 16, plugins=plugins)
  150. assert block.gen_attention_block.in_channels == 16
  151. assert block.nonlocal_block.in_channels == 16
  152. assert block.context_block.in_channels == 64
  153. x = torch.randn(1, 64, 56, 56)
  154. x_out = block(x)
  155. assert x_out.shape == torch.Size([1, 64, 56, 56])
  156. # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
  157. # conv3
  158. plugins = [
  159. dict(
  160. cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
  161. position='after_conv2'),
  162. dict(
  163. cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
  164. position='after_conv3'),
  165. dict(
  166. cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
  167. position='after_conv3')
  168. ]
  169. block = Bottleneck(64, 16, plugins=plugins)
  170. assert block.context_block1.in_channels == 16
  171. assert block.context_block2.in_channels == 64
  172. assert block.context_block3.in_channels == 64
  173. x = torch.randn(1, 64, 56, 56)
  174. x_out = block(x)
  175. assert x_out.shape == torch.Size([1, 64, 56, 56])
  176. def test_simplied_basic_block():
  177. with pytest.raises(AssertionError):
  178. # Not implemented yet.
  179. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
  180. SimplifiedBasicBlock(64, 64, dcn=dcn)
  181. with pytest.raises(AssertionError):
  182. # Not implemented yet.
  183. plugins = [
  184. dict(
  185. cfg=dict(type='ContextBlock', ratio=1. / 16),
  186. position='after_conv3')
  187. ]
  188. SimplifiedBasicBlock(64, 64, plugins=plugins)
  189. with pytest.raises(AssertionError):
  190. # Not implemented yet
  191. plugins = [
  192. dict(
  193. cfg=dict(
  194. type='GeneralizedAttention',
  195. spatial_range=-1,
  196. num_heads=8,
  197. attention_type='0010',
  198. kv_stride=2),
  199. position='after_conv2')
  200. ]
  201. SimplifiedBasicBlock(64, 64, plugins=plugins)
  202. with pytest.raises(AssertionError):
  203. # Not implemented yet
  204. SimplifiedBasicBlock(64, 64, with_cp=True)
  205. # test SimplifiedBasicBlock structure and forward
  206. block = SimplifiedBasicBlock(64, 64)
  207. assert block.conv1.in_channels == 64
  208. assert block.conv1.out_channels == 64
  209. assert block.conv1.kernel_size == (3, 3)
  210. assert block.conv2.in_channels == 64
  211. assert block.conv2.out_channels == 64
  212. assert block.conv2.kernel_size == (3, 3)
  213. x = torch.randn(1, 64, 56, 56)
  214. x_out = block(x)
  215. assert x_out.shape == torch.Size([1, 64, 56, 56])
  216. # test SimplifiedBasicBlock without norm
  217. block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
  218. assert block.norm1 is None
  219. assert block.norm2 is None
  220. x_out = block(x)
  221. assert x_out.shape == torch.Size([1, 64, 56, 56])
  222. def test_resnet_res_layer():
  223. # Test ResLayer of 3 Bottleneck w\o downsample
  224. layer = ResLayer(Bottleneck, 64, 16, 3)
  225. assert len(layer) == 3
  226. assert layer[0].conv1.in_channels == 64
  227. assert layer[0].conv1.out_channels == 16
  228. for i in range(1, len(layer)):
  229. assert layer[i].conv1.in_channels == 64
  230. assert layer[i].conv1.out_channels == 16
  231. for i in range(len(layer)):
  232. assert layer[i].downsample is None
  233. x = torch.randn(1, 64, 56, 56)
  234. x_out = layer(x)
  235. assert x_out.shape == torch.Size([1, 64, 56, 56])
  236. # Test ResLayer of 3 Bottleneck with downsample
  237. layer = ResLayer(Bottleneck, 64, 64, 3)
  238. assert layer[0].downsample[0].out_channels == 256
  239. for i in range(1, len(layer)):
  240. assert layer[i].downsample is None
  241. x = torch.randn(1, 64, 56, 56)
  242. x_out = layer(x)
  243. assert x_out.shape == torch.Size([1, 256, 56, 56])
  244. # Test ResLayer of 3 Bottleneck with stride=2
  245. layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
  246. assert layer[0].downsample[0].out_channels == 256
  247. assert layer[0].downsample[0].stride == (2, 2)
  248. for i in range(1, len(layer)):
  249. assert layer[i].downsample is None
  250. x = torch.randn(1, 64, 56, 56)
  251. x_out = layer(x)
  252. assert x_out.shape == torch.Size([1, 256, 28, 28])
  253. # Test ResLayer of 3 Bottleneck with stride=2 and average downsample
  254. layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
  255. assert isinstance(layer[0].downsample[0], AvgPool2d)
  256. assert layer[0].downsample[1].out_channels == 256
  257. assert layer[0].downsample[1].stride == (1, 1)
  258. for i in range(1, len(layer)):
  259. assert layer[i].downsample is None
  260. x = torch.randn(1, 64, 56, 56)
  261. x_out = layer(x)
  262. assert x_out.shape == torch.Size([1, 256, 28, 28])
  263. # Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
  264. layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
  265. assert layer[2].downsample[0].out_channels == 64
  266. assert layer[2].downsample[0].stride == (2, 2)
  267. for i in range(len(layer) - 1):
  268. assert layer[i].downsample is None
  269. x = torch.randn(1, 64, 56, 56)
  270. x_out = layer(x)
  271. assert x_out.shape == torch.Size([1, 64, 28, 28])
  272. def test_resnest_stem():
  273. # Test default stem_channels
  274. model = ResNet(50)
  275. assert model.stem_channels == 64
  276. assert model.conv1.out_channels == 64
  277. assert model.norm1.num_features == 64
  278. # Test default stem_channels, with base_channels=3
  279. model = ResNet(50, base_channels=3)
  280. assert model.stem_channels == 3
  281. assert model.conv1.out_channels == 3
  282. assert model.norm1.num_features == 3
  283. assert model.layer1[0].conv1.in_channels == 3
  284. # Test stem_channels=3
  285. model = ResNet(50, stem_channels=3)
  286. assert model.stem_channels == 3
  287. assert model.conv1.out_channels == 3
  288. assert model.norm1.num_features == 3
  289. assert model.layer1[0].conv1.in_channels == 3
  290. # Test stem_channels=3, with base_channels=2
  291. model = ResNet(50, stem_channels=3, base_channels=2)
  292. assert model.stem_channels == 3
  293. assert model.conv1.out_channels == 3
  294. assert model.norm1.num_features == 3
  295. assert model.layer1[0].conv1.in_channels == 3
  296. # Test V1d stem_channels
  297. model = ResNetV1d(depth=50, stem_channels=6)
  298. model.train()
  299. assert model.stem[0].out_channels == 3
  300. assert model.stem[1].num_features == 3
  301. assert model.stem[3].out_channels == 3
  302. assert model.stem[4].num_features == 3
  303. assert model.stem[6].out_channels == 6
  304. assert model.stem[7].num_features == 6
  305. assert model.layer1[0].conv1.in_channels == 6
  306. def test_resnet_backbone():
  307. """Test resnet backbone."""
  308. with pytest.raises(KeyError):
  309. # ResNet depth should be in [18, 34, 50, 101, 152]
  310. ResNet(20)
  311. with pytest.raises(AssertionError):
  312. # In ResNet: 1 <= num_stages <= 4
  313. ResNet(50, num_stages=0)
  314. with pytest.raises(AssertionError):
  315. # len(stage_with_dcn) == num_stages
  316. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
  317. ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
  318. with pytest.raises(AssertionError):
  319. # len(stage_with_plugin) == num_stages
  320. plugins = [
  321. dict(
  322. cfg=dict(type='ContextBlock', ratio=1. / 16),
  323. stages=(False, True, True),
  324. position='after_conv3')
  325. ]
  326. ResNet(50, plugins=plugins)
  327. with pytest.raises(AssertionError):
  328. # In ResNet: 1 <= num_stages <= 4
  329. ResNet(50, num_stages=5)
  330. with pytest.raises(AssertionError):
  331. # len(strides) == len(dilations) == num_stages
  332. ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
  333. with pytest.raises(TypeError):
  334. # pretrained must be a string path
  335. model = ResNet(50, pretrained=0)
  336. with pytest.raises(AssertionError):
  337. # Style must be in ['pytorch', 'caffe']
  338. ResNet(50, style='tensorflow')
  339. # Test ResNet50 norm_eval=True
  340. model = ResNet(50, norm_eval=True, base_channels=1)
  341. model.train()
  342. assert check_norm_state(model.modules(), False)
  343. # Test ResNet50 with torchvision pretrained weight
  344. model = ResNet(
  345. depth=50, norm_eval=True, pretrained='torchvision://resnet50')
  346. model.train()
  347. assert check_norm_state(model.modules(), False)
  348. # Test ResNet50 with first stage frozen
  349. frozen_stages = 1
  350. model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
  351. model.train()
  352. assert model.norm1.training is False
  353. for layer in [model.conv1, model.norm1]:
  354. for param in layer.parameters():
  355. assert param.requires_grad is False
  356. for i in range(1, frozen_stages + 1):
  357. layer = getattr(model, f'layer{i}')
  358. for mod in layer.modules():
  359. if isinstance(mod, _BatchNorm):
  360. assert mod.training is False
  361. for param in layer.parameters():
  362. assert param.requires_grad is False
  363. # Test ResNet50V1d with first stage frozen
  364. model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
  365. assert len(model.stem) == 9
  366. model.train()
  367. assert check_norm_state(model.stem, False)
  368. for param in model.stem.parameters():
  369. assert param.requires_grad is False
  370. for i in range(1, frozen_stages + 1):
  371. layer = getattr(model, f'layer{i}')
  372. for mod in layer.modules():
  373. if isinstance(mod, _BatchNorm):
  374. assert mod.training is False
  375. for param in layer.parameters():
  376. assert param.requires_grad is False
  377. # Test ResNet18 forward
  378. model = ResNet(18)
  379. model.train()
  380. imgs = torch.randn(1, 3, 32, 32)
  381. feat = model(imgs)
  382. assert len(feat) == 4
  383. assert feat[0].shape == torch.Size([1, 64, 8, 8])
  384. assert feat[1].shape == torch.Size([1, 128, 4, 4])
  385. assert feat[2].shape == torch.Size([1, 256, 2, 2])
  386. assert feat[3].shape == torch.Size([1, 512, 1, 1])
  387. # Test ResNet18 with checkpoint forward
  388. model = ResNet(18, with_cp=True)
  389. for m in model.modules():
  390. if is_block(m):
  391. assert m.with_cp
  392. # Test ResNet50 with BatchNorm forward
  393. model = ResNet(50, base_channels=1)
  394. for m in model.modules():
  395. if is_norm(m):
  396. assert isinstance(m, _BatchNorm)
  397. model.train()
  398. imgs = torch.randn(1, 3, 32, 32)
  399. feat = model(imgs)
  400. assert len(feat) == 4
  401. assert feat[0].shape == torch.Size([1, 4, 8, 8])
  402. assert feat[1].shape == torch.Size([1, 8, 4, 4])
  403. assert feat[2].shape == torch.Size([1, 16, 2, 2])
  404. assert feat[3].shape == torch.Size([1, 32, 1, 1])
  405. # Test ResNet50 with layers 1, 2, 3 out forward
  406. model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
  407. model.train()
  408. imgs = torch.randn(1, 3, 32, 32)
  409. feat = model(imgs)
  410. assert len(feat) == 3
  411. assert feat[0].shape == torch.Size([1, 4, 8, 8])
  412. assert feat[1].shape == torch.Size([1, 8, 4, 4])
  413. assert feat[2].shape == torch.Size([1, 16, 2, 2])
  414. # Test ResNet50 with checkpoint forward
  415. model = ResNet(50, with_cp=True, base_channels=1)
  416. for m in model.modules():
  417. if is_block(m):
  418. assert m.with_cp
  419. model.train()
  420. imgs = torch.randn(1, 3, 32, 32)
  421. feat = model(imgs)
  422. assert len(feat) == 4
  423. assert feat[0].shape == torch.Size([1, 4, 8, 8])
  424. assert feat[1].shape == torch.Size([1, 8, 4, 4])
  425. assert feat[2].shape == torch.Size([1, 16, 2, 2])
  426. assert feat[3].shape == torch.Size([1, 32, 1, 1])
  427. # Test ResNet50 with GroupNorm forward
  428. model = ResNet(
  429. 50,
  430. base_channels=4,
  431. norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
  432. for m in model.modules():
  433. if is_norm(m):
  434. assert isinstance(m, GroupNorm)
  435. model.train()
  436. imgs = torch.randn(1, 3, 32, 32)
  437. feat = model(imgs)
  438. assert len(feat) == 4
  439. assert feat[0].shape == torch.Size([1, 16, 8, 8])
  440. assert feat[1].shape == torch.Size([1, 32, 4, 4])
  441. assert feat[2].shape == torch.Size([1, 64, 2, 2])
  442. assert feat[3].shape == torch.Size([1, 128, 1, 1])
  443. # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
  444. # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
  445. plugins = [
  446. dict(
  447. cfg=dict(
  448. type='GeneralizedAttention',
  449. spatial_range=-1,
  450. num_heads=8,
  451. attention_type='0010',
  452. kv_stride=2),
  453. stages=(False, True, True, True),
  454. position='after_conv2'),
  455. dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
  456. dict(
  457. cfg=dict(type='ContextBlock', ratio=1. / 16),
  458. stages=(False, True, True, False),
  459. position='after_conv3')
  460. ]
  461. model = ResNet(50, plugins=plugins, base_channels=8)
  462. for m in model.layer1.modules():
  463. if is_block(m):
  464. assert not hasattr(m, 'context_block')
  465. assert not hasattr(m, 'gen_attention_block')
  466. assert m.nonlocal_block.in_channels == 8
  467. for m in model.layer2.modules():
  468. if is_block(m):
  469. assert m.nonlocal_block.in_channels == 16
  470. assert m.gen_attention_block.in_channels == 16
  471. assert m.context_block.in_channels == 64
  472. for m in model.layer3.modules():
  473. if is_block(m):
  474. assert m.nonlocal_block.in_channels == 32
  475. assert m.gen_attention_block.in_channels == 32
  476. assert m.context_block.in_channels == 128
  477. for m in model.layer4.modules():
  478. if is_block(m):
  479. assert m.nonlocal_block.in_channels == 64
  480. assert m.gen_attention_block.in_channels == 64
  481. assert not hasattr(m, 'context_block')
  482. model.train()
  483. imgs = torch.randn(1, 3, 32, 32)
  484. feat = model(imgs)
  485. assert len(feat) == 4
  486. assert feat[0].shape == torch.Size([1, 32, 8, 8])
  487. assert feat[1].shape == torch.Size([1, 64, 4, 4])
  488. assert feat[2].shape == torch.Size([1, 128, 2, 2])
  489. assert feat[3].shape == torch.Size([1, 256, 1, 1])
  490. # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
  491. # conv3 in layers 2, 3, 4
  492. plugins = [
  493. dict(
  494. cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
  495. stages=(False, True, True, False),
  496. position='after_conv3'),
  497. dict(
  498. cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
  499. stages=(False, True, True, False),
  500. position='after_conv3')
  501. ]
  502. model = ResNet(50, plugins=plugins, base_channels=8)
  503. for m in model.layer1.modules():
  504. if is_block(m):
  505. assert not hasattr(m, 'context_block')
  506. assert not hasattr(m, 'context_block1')
  507. assert not hasattr(m, 'context_block2')
  508. for m in model.layer2.modules():
  509. if is_block(m):
  510. assert not hasattr(m, 'context_block')
  511. assert m.context_block1.in_channels == 64
  512. assert m.context_block2.in_channels == 64
  513. for m in model.layer3.modules():
  514. if is_block(m):
  515. assert not hasattr(m, 'context_block')
  516. assert m.context_block1.in_channels == 128
  517. assert m.context_block2.in_channels == 128
  518. for m in model.layer4.modules():
  519. if is_block(m):
  520. assert not hasattr(m, 'context_block')
  521. assert not hasattr(m, 'context_block1')
  522. assert not hasattr(m, 'context_block2')
  523. model.train()
  524. imgs = torch.randn(1, 3, 32, 32)
  525. feat = model(imgs)
  526. assert len(feat) == 4
  527. assert feat[0].shape == torch.Size([1, 32, 8, 8])
  528. assert feat[1].shape == torch.Size([1, 64, 4, 4])
  529. assert feat[2].shape == torch.Size([1, 128, 2, 2])
  530. assert feat[3].shape == torch.Size([1, 256, 1, 1])
  531. # Test ResNet50 zero initialization of residual
  532. model = ResNet(50, zero_init_residual=True, base_channels=1)
  533. model.init_weights()
  534. for m in model.modules():
  535. if isinstance(m, Bottleneck):
  536. assert assert_params_all_zeros(m.norm3)
  537. elif isinstance(m, BasicBlock):
  538. assert assert_params_all_zeros(m.norm2)
  539. model.train()
  540. imgs = torch.randn(1, 3, 32, 32)
  541. feat = model(imgs)
  542. assert len(feat) == 4
  543. assert feat[0].shape == torch.Size([1, 4, 8, 8])
  544. assert feat[1].shape == torch.Size([1, 8, 4, 4])
  545. assert feat[2].shape == torch.Size([1, 16, 2, 2])
  546. assert feat[3].shape == torch.Size([1, 32, 1, 1])
  547. # Test ResNetV1d forward
  548. model = ResNetV1d(depth=50, base_channels=2)
  549. model.train()
  550. imgs = torch.randn(1, 3, 32, 32)
  551. feat = model(imgs)
  552. assert len(feat) == 4
  553. assert feat[0].shape == torch.Size([1, 8, 8, 8])
  554. assert feat[1].shape == torch.Size([1, 16, 4, 4])
  555. assert feat[2].shape == torch.Size([1, 32, 2, 2])
  556. assert feat[3].shape == torch.Size([1, 64, 1, 1])

No Description

Contributors (2)