|
| 1 | +# dataset settings |
| 2 | +dataset_type = 'OpenImagesDataset' |
| 3 | +data_root = 'data/OpenImages/' |
| 4 | +img_norm_cfg = dict( |
| 5 | + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) |
| 6 | +train_pipeline = [ |
| 7 | + dict(type='LoadImageFromFile'), |
| 8 | + dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True), |
| 9 | + dict(type='Resize', img_scale=(1024, 800), keep_ratio=True), |
| 10 | + dict(type='RandomFlip', flip_ratio=0.5), |
| 11 | + dict(type='Normalize', **img_norm_cfg), |
| 12 | + dict(type='Pad', size_divisor=32), |
| 13 | + dict(type='DefaultFormatBundle'), |
| 14 | + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), |
| 15 | +] |
| 16 | +test_pipeline = [ |
| 17 | + dict(type='LoadImageFromFile'), |
| 18 | + dict( |
| 19 | + type='MultiScaleFlipAug', |
| 20 | + img_scale=(1024, 800), |
| 21 | + flip=False, |
| 22 | + transforms=[ |
| 23 | + dict(type='Resize', keep_ratio=True), |
| 24 | + dict(type='RandomFlip'), |
| 25 | + dict(type='Normalize', **img_norm_cfg), |
| 26 | + dict(type='Pad', size_divisor=32), |
| 27 | + dict(type='ImageToTensor', keys=['img']), |
| 28 | + dict(type='Collect', keys=['img']), |
| 29 | + ], |
| 30 | + ), |
| 31 | +] |
| 32 | +data = dict( |
| 33 | + samples_per_gpu=2, |
| 34 | + workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory |
| 35 | + train=dict( |
| 36 | + type=dataset_type, |
| 37 | + ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', |
| 38 | + img_prefix=data_root + 'OpenImages/train/', |
| 39 | + label_file=data_root + 'annotations/class-descriptions-boxable.csv', |
| 40 | + hierarchy_file=data_root + |
| 41 | + 'annotations/bbox_labels_600_hierarchy.json', |
| 42 | + pipeline=train_pipeline), |
| 43 | + val=dict( |
| 44 | + type=dataset_type, |
| 45 | + ann_file=data_root + 'annotations/validation-annotations-bbox.csv', |
| 46 | + img_prefix=data_root + 'OpenImages/validation/', |
| 47 | + label_file=data_root + 'annotations/class-descriptions-boxable.csv', |
| 48 | + hierarchy_file=data_root + |
| 49 | + 'annotations/bbox_labels_600_hierarchy.json', |
| 50 | + meta_file=data_root + 'annotations/validation-image-metas.pkl', |
| 51 | + image_level_ann_file=data_root + |
| 52 | + 'annotations/validation-annotations-human-imagelabels-boxable.csv', |
| 53 | + pipeline=test_pipeline), |
| 54 | + test=dict( |
| 55 | + type=dataset_type, |
| 56 | + ann_file=data_root + 'annotations/validation-annotations-bbox.csv', |
| 57 | + img_prefix=data_root + 'OpenImages/validation/', |
| 58 | + label_file=data_root + 'annotations/class-descriptions-boxable.csv', |
| 59 | + hierarchy_file=data_root + |
| 60 | + 'annotations/bbox_labels_600_hierarchy.json', |
| 61 | + meta_file=data_root + 'annotations/validation-image-metas.pkl', |
| 62 | + image_level_ann_file=data_root + |
| 63 | + 'annotations/validation-annotations-human-imagelabels-boxable.csv', |
| 64 | + pipeline=test_pipeline)) |
| 65 | +evaluation = dict(interval=1, metric='mAP') |
0 commit comments