You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

AD_dsxw_test11.py 5.8 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #_base_ = '../cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py'
  2. _base_ = [
  3. '../_base_/models/cascade_rcnn_r50_fpn.py',
  4. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
  5. ]
  6. model = dict(
  7. backbone=dict(
  8. type='DetectoRS_ResNet',
  9. conv_cfg=dict(type='ConvAWS'),
  10. sac=dict(type='SAC', use_deform=True),
  11. stage_with_sac=(False, True, True, True),
  12. output_img=True),
  13. neck=dict(
  14. type='RFP',
  15. rfp_steps=2,
  16. aspp_out_channels=64,
  17. aspp_dilations=(1, 3, 6, 1),
  18. rfp_backbone=dict(
  19. rfp_inplanes=256,
  20. type='DetectoRS_ResNet',
  21. depth=50,
  22. num_stages=4,
  23. out_indices=(0, 1, 2, 3),
  24. frozen_stages=1,
  25. norm_cfg=dict(type='BN', requires_grad=True),
  26. norm_eval=True,
  27. conv_cfg=dict(type='ConvAWS'),
  28. pretrained='torchvision://resnet50',
  29. style='pytorch')),
  30. roi_head=dict(
  31. bbox_head=[
  32. dict(
  33. type='Shared2FCBBoxHead',
  34. in_channels=256,
  35. fc_out_channels=1024,
  36. roi_feat_size=7,
  37. num_classes=3,
  38. bbox_coder=dict(
  39. type='DeltaXYWHBBoxCoder',
  40. target_means=[0., 0., 0., 0.],
  41. target_stds=[0.1, 0.1, 0.2, 0.2]),
  42. reg_class_agnostic=True,
  43. loss_cls=dict(
  44. type='CrossEntropyLoss',
  45. use_sigmoid=False,
  46. loss_weight=1.0),
  47. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
  48. loss_weight=1.0)),
  49. dict(
  50. type='Shared2FCBBoxHead',
  51. in_channels=256,
  52. fc_out_channels=1024,
  53. roi_feat_size=7,
  54. num_classes=3,
  55. bbox_coder=dict(
  56. type='DeltaXYWHBBoxCoder',
  57. target_means=[0., 0., 0., 0.],
  58. target_stds=[0.05, 0.05, 0.1, 0.1]),
  59. reg_class_agnostic=True,
  60. loss_cls=dict(
  61. type='CrossEntropyLoss',
  62. use_sigmoid=False,
  63. loss_weight=1.0),
  64. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
  65. loss_weight=1.0)),
  66. dict(
  67. type='Shared2FCBBoxHead',
  68. in_channels=256,
  69. fc_out_channels=1024,
  70. roi_feat_size=7,
  71. num_classes=3,
  72. bbox_coder=dict(
  73. type='DeltaXYWHBBoxCoder',
  74. target_means=[0., 0., 0., 0.],
  75. target_stds=[0.033, 0.033, 0.067, 0.067]),
  76. reg_class_agnostic=True,
  77. loss_cls=dict(
  78. type='CrossEntropyLoss',
  79. use_sigmoid=False,
  80. loss_weight=1.0),
  81. loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
  82. ]))
  83. dataset_type = 'CocoDataset'
  84. classes = ('False_welding','Missing_parts','Displacement')
  85. img_norm_cfg = dict(
  86. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
  87. train_pipeline = [
  88. dict(type='LoadImageFromFile'),
  89. dict(type='LoadAnnotations', with_bbox=True),
  90. dict(
  91. type='Resize',
  92. img_scale=[(400, 300), (500, 400)],
  93. multiscale_mode='value',
  94. keep_ratio=True),
  95. dict(type='RandomFlip', flip_ratio=[0.2,0.2,0.2], direction=['horizontal', 'vertical', 'diagonal']),
  96. dict(type='BrightnessTransform', level=5, prob=0.5),
  97. dict(type='ContrastTransform', level=5, prob=0.5),
  98. dict(type='RandomShift', shift_ratio=0.5),
  99. dict(type='MinIoURandomCrop', min_ious=(0.5, 0.7, 0.9), min_crop_size=0.8),
  100. dict(type='Normalize', **img_norm_cfg),
  101. dict(type='Pad', size_divisor=32),
  102. dict(type='DefaultFormatBundle'),
  103. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
  104. ]
  105. test_pipeline = [
  106. dict(type='LoadImageFromFile'),
  107. dict(
  108. type='MultiScaleFlipAug',
  109. img_scale=[(400, 300), (500, 400)],
  110. flip=True,
  111. transforms=[
  112. dict(type='Resize', keep_ratio=True),
  113. dict(type='RandomFlip'),
  114. dict(type='Normalize', **img_norm_cfg),
  115. dict(type='Pad', size_divisor=32),
  116. dict(type='ImageToTensor', keys=['img']),
  117. dict(type='Collect', keys=['img']),
  118. ])
  119. ]
  120. data = dict(
  121. samples_per_gpu=16,
  122. workers_per_gpu=8,
  123. train=dict(
  124. type=dataset_type,
  125. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_train/images/',
  126. classes=classes,
  127. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_train/annotations/train.json',
  128. pipeline=train_pipeline),
  129. val=dict(
  130. type=dataset_type,
  131. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_test/images/',
  132. classes=classes,
  133. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_test/annotations/test.json',
  134. pipeline=test_pipeline),
  135. test=dict(
  136. type=dataset_type,
  137. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_test/images/',
  138. classes=classes,
  139. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_test/annotations/test.json',
  140. pipeline=test_pipeline))
  141. # optimizer
  142. optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
  143. optimizer_config = dict(grad_clip=None)
  144. # learning policy
  145. lr_config = dict(
  146. policy='CosineAnnealing',
  147. warmup='linear',
  148. warmup_iters=2000,
  149. warmup_ratio=1.0 / 10,
  150. min_lr_ratio=1e-5)
  151. runner = dict(type='EpochBasedRunner', max_epochs=60)
  152. evaluation = dict(interval=5, metric='bbox')

No Description

Contributors (1)