You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

AD_dsxw_test13.py 5.7 kB

2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. _base_ = [
  2. '../_base_/models/cascade_rcnn_r50_fpn.py',
  3. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
  4. ]
  5. pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth'
  6. model = dict(
  7. backbone=dict(
  8. depths=[2, 2, 18, 2],
  9. _delete_=True,
  10. type='SwinTransformer',
  11. embed_dims=96,
  12. num_heads=[3, 6, 12, 24],
  13. window_size=7,
  14. mlp_ratio=4,
  15. qkv_bias=True,
  16. qk_scale=None,
  17. drop_rate=0.,
  18. attn_drop_rate=0.,
  19. drop_path_rate=0.2,
  20. patch_norm=True,
  21. out_indices=(0, 1, 2, 3),
  22. with_cp=False,
  23. convert_weights=True,
  24. init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
  25. neck=dict(
  26. type='FPN',#FPN PAFPN
  27. in_channels=[96, 192, 384, 768],
  28. out_channels=256,
  29. num_outs=5),
  30. roi_head=dict(
  31. bbox_head=[
  32. dict(
  33. type='Shared2FCBBoxHead',
  34. in_channels=256,
  35. fc_out_channels=1024,
  36. roi_feat_size=7,
  37. num_classes=3,
  38. bbox_coder=dict(
  39. type='DeltaXYWHBBoxCoder',
  40. target_means=[0., 0., 0., 0.],
  41. target_stds=[0.1, 0.1, 0.2, 0.2]),
  42. reg_class_agnostic=True,
  43. loss_cls=dict(
  44. type='CrossEntropyLoss',
  45. use_sigmoid=False,
  46. loss_weight=1.0),
  47. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
  48. loss_weight=1.0)),
  49. dict(
  50. type='Shared2FCBBoxHead',
  51. in_channels=256,
  52. fc_out_channels=1024,
  53. roi_feat_size=7,
  54. num_classes=3,
  55. bbox_coder=dict(
  56. type='DeltaXYWHBBoxCoder',
  57. target_means=[0., 0., 0., 0.],
  58. target_stds=[0.05, 0.05, 0.1, 0.1]),
  59. reg_class_agnostic=True,
  60. loss_cls=dict(
  61. type='CrossEntropyLoss',
  62. use_sigmoid=False,
  63. loss_weight=1.0),
  64. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
  65. loss_weight=1.0)),
  66. dict(
  67. type='Shared2FCBBoxHead',
  68. in_channels=256,
  69. fc_out_channels=1024,
  70. roi_feat_size=7,
  71. num_classes=3,
  72. bbox_coder=dict(
  73. type='DeltaXYWHBBoxCoder',
  74. target_means=[0., 0., 0., 0.],
  75. target_stds=[0.033, 0.033, 0.067, 0.067]),
  76. reg_class_agnostic=True,
  77. loss_cls=dict(
  78. type='CrossEntropyLoss',
  79. use_sigmoid=False,
  80. loss_weight=1.0),
  81. loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
  82. ]))
  83. dataset_type = 'CocoDataset'
  84. classes = ('False_welding','Missing_parts','Displacement')
  85. img_norm_cfg = dict(
  86. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
  87. train_pipeline = [
  88. dict(type='LoadImageFromFile'),
  89. dict(type='LoadAnnotations', with_bbox=True),
  90. dict(
  91. type='Resize',
  92. img_scale=[(400, 300), (500, 400)],
  93. multiscale_mode='value',
  94. keep_ratio=True),
  95. dict(type='RandomFlip', flip_ratio=[0.2,0.2,0.2], direction=['horizontal', 'vertical', 'diagonal']),
  96. dict(type='BrightnessTransform', level=5, prob=0.5),
  97. dict(type='ContrastTransform', level=5, prob=0.5),
  98. dict(type='RandomShift', shift_ratio=0.5),
  99. dict(type='MinIoURandomCrop', min_ious=(0.5, 0.7, 0.9), min_crop_size=0.8),
  100. dict(type='Normalize', **img_norm_cfg),
  101. dict(type='Pad', size_divisor=32),
  102. dict(type='DefaultFormatBundle'),
  103. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
  104. ]
  105. test_pipeline = [
  106. dict(type='LoadImageFromFile'),
  107. dict(
  108. type='MultiScaleFlipAug',
  109. img_scale=[(400, 300), (500, 400)],
  110. flip=True,
  111. transforms=[
  112. dict(type='Resize', keep_ratio=True),
  113. dict(type='RandomFlip'),
  114. dict(type='Normalize', **img_norm_cfg),
  115. dict(type='Pad', size_divisor=32),
  116. dict(type='ImageToTensor', keys=['img']),
  117. dict(type='Collect', keys=['img']),
  118. ])
  119. ]
  120. data = dict(
  121. samples_per_gpu=16,
  122. workers_per_gpu=8,
  123. train=dict(
  124. type=dataset_type,
  125. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_train/images/',
  126. classes=classes,
  127. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_train/annotations/train.json',
  128. pipeline=train_pipeline),
  129. val=dict(
  130. type=dataset_type,
  131. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_test/images/',
  132. classes=classes,
  133. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_test/annotations/test.json',
  134. pipeline=test_pipeline),
  135. test=dict(
  136. type=dataset_type,
  137. img_prefix='/home/shanwei-luo/userdata/datasets/dsxw_test/images/',
  138. classes=classes,
  139. ann_file='/home/shanwei-luo/userdata/datasets/dsxw_test/annotations/test.json',
  140. pipeline=test_pipeline))
  141. # optimizer
  142. optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
  143. optimizer_config = dict(grad_clip=None)
  144. # learning policy
  145. lr_config = dict(
  146. policy='CosineAnnealing',
  147. warmup='linear',
  148. warmup_iters=2000,
  149. warmup_ratio=1.0 / 10,
  150. min_lr_ratio=1e-5)
  151. runner = dict(type='EpochBasedRunner', max_epochs=60)
  152. evaluation = dict(interval=5, metric='bbox')

No Description

Contributors (3)