You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

config.py 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. #" :===========================================================================
  15. """
  16. network config setting, will be used in train.py and eval.py
  17. """
  18. from easydict import EasyDict as ed
  19. config = ed({
  20. "img_width": 1280,
  21. "img_height": 768,
  22. "keep_ratio": False,
  23. "flip_ratio": 0.5,
  24. "photo_ratio": 0.5,
  25. "expand_ratio": 1.0,
  26. # anchor
  27. "feature_shapes": [(192, 320), (96, 160), (48, 80), (24, 40), (12, 20)],
  28. "anchor_scales": [8],
  29. "anchor_ratios": [0.5, 1.0, 2.0],
  30. "anchor_strides": [4, 8, 16, 32, 64],
  31. "num_anchors": 3,
  32. # resnet
  33. "resnet_block": [3, 4, 6, 3],
  34. "resnet_in_channels": [64, 256, 512, 1024],
  35. "resnet_out_channels": [256, 512, 1024, 2048],
  36. # fpn
  37. "fpn_in_channels": [256, 512, 1024, 2048],
  38. "fpn_out_channels": 256,
  39. "fpn_num_outs": 5,
  40. # rpn
  41. "rpn_in_channels": 256,
  42. "rpn_feat_channels": 256,
  43. "rpn_loss_cls_weight": 1.0,
  44. "rpn_loss_reg_weight": 1.0,
  45. "rpn_cls_out_channels": 1,
  46. "rpn_target_means": [0., 0., 0., 0.],
  47. "rpn_target_stds": [1.0, 1.0, 1.0, 1.0],
  48. # bbox_assign_sampler
  49. "neg_iou_thr": 0.3,
  50. "pos_iou_thr": 0.7,
  51. "min_pos_iou": 0.3,
  52. "num_bboxes": 245520,
  53. "num_gts": 128,
  54. "num_expected_neg": 256,
  55. "num_expected_pos": 128,
  56. # proposal
  57. "activate_num_classes": 2,
  58. "use_sigmoid_cls": True,
  59. # roi_align
  60. "roi_layer": dict(type='RoIAlign', out_size=7, sample_num=2),
  61. "roi_align_out_channels": 256,
  62. "roi_align_featmap_strides": [4, 8, 16, 32],
  63. "roi_align_finest_scale": 56,
  64. "roi_sample_num": 640,
  65. # bbox_assign_sampler_stage2
  66. "neg_iou_thr_stage2": 0.5,
  67. "pos_iou_thr_stage2": 0.5,
  68. "min_pos_iou_stage2": 0.5,
  69. "num_bboxes_stage2": 2000,
  70. "num_expected_pos_stage2": 128,
  71. "num_expected_neg_stage2": 512,
  72. "num_expected_total_stage2": 512,
  73. # rcnn
  74. "rcnn_num_layers": 2,
  75. "rcnn_in_channels": 256,
  76. "rcnn_fc_out_channels": 1024,
  77. "rcnn_loss_cls_weight": 1,
  78. "rcnn_loss_reg_weight": 1,
  79. "rcnn_target_means": [0., 0., 0., 0.],
  80. "rcnn_target_stds": [0.1, 0.1, 0.2, 0.2],
  81. # train proposal
  82. "rpn_proposal_nms_across_levels": False,
  83. "rpn_proposal_nms_pre": 2000,
  84. "rpn_proposal_nms_post": 2000,
  85. "rpn_proposal_max_num": 2000,
  86. "rpn_proposal_nms_thr": 0.7,
  87. "rpn_proposal_min_bbox_size": 0,
  88. # test proposal
  89. "rpn_nms_across_levels": False,
  90. "rpn_nms_pre": 1000,
  91. "rpn_nms_post": 1000,
  92. "rpn_max_num": 1000,
  93. "rpn_nms_thr": 0.7,
  94. "rpn_min_bbox_min_size": 0,
  95. "test_score_thr": 0.05,
  96. "test_iou_thr": 0.5,
  97. "test_max_per_img": 100,
  98. "test_batch_size": 2,
  99. "rpn_head_loss_type": "CrossEntropyLoss",
  100. "rpn_head_use_sigmoid": True,
  101. "rpn_head_weight": 1.0,
  102. # LR
  103. "base_lr": 0.02,
  104. "base_step": 58633,
  105. "total_epoch": 13,
  106. "warmup_step": 500,
  107. "warmup_mode": "linear",
  108. "warmup_ratio": 1/3.0,
  109. "sgd_step": [8, 11],
  110. "sgd_momentum": 0.9,
  111. # train
  112. "batch_size": 2,
  113. "loss_scale": 1,
  114. "momentum": 0.91,
  115. "weight_decay": 1e-4,
  116. "epoch_size": 12,
  117. "save_checkpoint": True,
  118. "save_checkpoint_epochs": 1,
  119. "keep_checkpoint_max": 10,
  120. "save_checkpoint_path": "./checkpoint",
  121. "mindrecord_dir": "../MindRecoid_COCO_TRAIN",
  122. "coco_root": "./cocodataset/",
  123. "train_data_type": "train2017",
  124. "val_data_type": "val2017",
  125. "instance_set": "annotations/instances_{}.json",
  126. "coco_classes": ('background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
  127. 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
  128. 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
  129. 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
  130. 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
  131. 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
  132. 'kite', 'baseball bat', 'baseball glove', 'skateboard',
  133. 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
  134. 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
  135. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
  136. 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
  137. 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
  138. 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
  139. 'refrigerator', 'book', 'clock', 'vase', 'scissors',
  140. 'teddy bear', 'hair drier', 'toothbrush'),
  141. "num_classes": 81
  142. })