Skip to content

Commit a3ee332

Browse files
committed
manual merge
1 parent 2c8981d commit a3ee332

File tree

138 files changed

+21916
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

138 files changed

+21916
-0
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
*/__pycache__

LICENSE

+674
Large diffs are not rendered by default.

README.md

+3
Original file line numberDiff line numberDiff line change
@@ -1 +1,4 @@
1+
Run facedet
12

3+
4+
python .\yolo2deepsort.py --weights ./weights/yolov5n-face.pt --source 0 --view-img

data/argoverse_hd.yaml

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
2+
# Train command: python train.py --data argoverse_hd.yaml
3+
# Default dataset location is next to /yolov5:
4+
# /parent_folder
5+
# /argoverse
6+
# /yolov5
7+
8+
9+
# download command/URL (optional)
10+
download: bash data/scripts/get_argoverse_hd.sh
11+
12+
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
13+
train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images
14+
val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges
15+
test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview
16+
17+
# number of classes
18+
nc: 8
19+
20+
# class names
21+
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ]

data/coco.yaml

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# COCO 2017 dataset http://cocodataset.org
2+
# Train command: python train.py --data coco.yaml
3+
# Default dataset location is next to /yolov5:
4+
# /parent_folder
5+
# /coco
6+
# /yolov5
7+
8+
9+
# download command/URL (optional)
10+
download: bash data/scripts/get_coco.sh
11+
12+
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
13+
train: ../coco/train2017.txt # 118287 images
14+
val: ../coco/val2017.txt # 5000 images
15+
test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
16+
17+
# number of classes
18+
nc: 80
19+
20+
# class names
21+
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
22+
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
23+
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
24+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
25+
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
26+
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
27+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
28+
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
29+
'hair drier', 'toothbrush' ]
30+
31+
# Print classes
32+
# with open('data/coco.yaml') as f:
33+
# d = yaml.load(f, Loader=yaml.FullLoader) # dict
34+
# for i, x in enumerate(d['names']):
35+
# print(i, x)

data/coco128.yaml

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
# COCO 2017 dataset http://cocodataset.org - first 128 training images
2+
# Train command: python train.py --data coco128.yaml
3+
# Default dataset location is next to /yolov5:
4+
# /parent_folder
5+
# /coco128
6+
# /yolov5
7+
8+
9+
# download command/URL (optional)
10+
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
11+
12+
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
13+
train: ../coco128/images/train2017/ # 128 images
14+
val: ../coco128/images/train2017/ # 128 images
15+
16+
# number of classes
17+
nc: 80
18+
19+
# class names
20+
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
21+
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
22+
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
23+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
24+
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
25+
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
26+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
27+
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
28+
'hair drier', 'toothbrush' ]

data/hyp.finetune.yaml

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# Hyperparameters for VOC finetuning
2+
# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50
3+
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
4+
5+
6+
# Hyperparameter Evolution Results
7+
# Generations: 306
8+
# P R mAP.5 mAP.5:.95 box obj cls
9+
# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
10+
11+
lr0: 0.0032
12+
lrf: 0.12
13+
momentum: 0.843
14+
weight_decay: 0.00036
15+
warmup_epochs: 2.0
16+
warmup_momentum: 0.5
17+
warmup_bias_lr: 0.05
18+
box: 0.0296
19+
cls: 0.243
20+
cls_pw: 0.631
21+
obj: 0.301
22+
obj_pw: 0.911
23+
iou_t: 0.2
24+
anchor_t: 2.91
25+
# anchors: 3.63
26+
fl_gamma: 0.0
27+
hsv_h: 0.0138
28+
hsv_s: 0.664
29+
hsv_v: 0.464
30+
degrees: 0.373
31+
translate: 0.245
32+
scale: 0.898
33+
shear: 0.602
34+
perspective: 0.0
35+
flipud: 0.00856
36+
fliplr: 0.5
37+
mosaic: 1.0
38+
mixup: 0.243

data/hyp.scratch.yaml

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Hyperparameters for COCO training from scratch
2+
# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
3+
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
4+
5+
6+
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7+
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
8+
momentum: 0.937 # SGD momentum/Adam beta1
9+
weight_decay: 0.0005 # optimizer weight decay 5e-4
10+
warmup_epochs: 3.0 # warmup epochs (fractions ok)
11+
warmup_momentum: 0.8 # warmup initial momentum
12+
warmup_bias_lr: 0.1 # warmup initial bias lr
13+
box: 0.05 # box loss gain
14+
cls: 0.5 # cls loss gain
15+
landmark: 0.005 # landmark loss gain
16+
cls_pw: 1.0 # cls BCELoss positive_weight
17+
obj: 1.0 # obj loss gain (scale with pixels)
18+
obj_pw: 1.0 # obj BCELoss positive_weight
19+
iou_t: 0.20 # IoU training threshold
20+
anchor_t: 4.0 # anchor-multiple threshold
21+
# anchors: 3 # anchors per output layer (0 to ignore)
22+
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
23+
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
24+
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
25+
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
26+
degrees: 0.0 # image rotation (+/- deg)
27+
translate: 0.1 # image translation (+/- fraction)
28+
scale: 0.5 # image scale (+/- gain)
29+
shear: 0.5 # image shear (+/- deg)
30+
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
31+
flipud: 0.0 # image flip up-down (probability)
32+
fliplr: 0.5 # image flip left-right (probability)
33+
mosaic: 0.5 # image mosaic (probability)
34+
mixup: 0.0 # image mixup (probability)

data/images/1.jpg

247 KB
Loading

data/images/FDDB.png

83.7 KB
Loading

data/images/Widerface.jpg

221 KB
Loading

data/images/Yolo5face.png

830 KB
Loading

data/images/bus.jpg

476 KB
Loading

data/images/landmark.png

1.89 MB
Loading

data/images/pr-curves2.png

525 KB
Loading

data/images/result.jpg

1.21 MB
Loading

data/images/stem.png

11.7 KB
Loading

data/images/test.jpg

416 KB
Loading

data/images/yolo5-top.png

184 KB
Loading

data/images/yolo5.png

119 KB
Loading

data/images/zidane.jpg

165 KB
Loading

data/retinaface2yolo.py

+150
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
import os
2+
import os.path
3+
import sys
4+
import torch
5+
import torch.utils.data as data
6+
import cv2
7+
import numpy as np
8+
9+
class WiderFaceDetection(data.Dataset):
10+
def __init__(self, txt_path, preproc=None):
11+
self.preproc = preproc
12+
self.imgs_path = []
13+
self.words = []
14+
f = open(txt_path,'r')
15+
lines = f.readlines()
16+
isFirst = True
17+
labels = []
18+
for line in lines:
19+
line = line.rstrip()
20+
if line.startswith('#'):
21+
if isFirst is True:
22+
isFirst = False
23+
else:
24+
labels_copy = labels.copy()
25+
self.words.append(labels_copy)
26+
labels.clear()
27+
path = line[2:]
28+
path = txt_path.replace('label.txt','images/') + path
29+
self.imgs_path.append(path)
30+
else:
31+
line = line.split(' ')
32+
label = [float(x) for x in line]
33+
labels.append(label)
34+
35+
self.words.append(labels)
36+
37+
def __len__(self):
38+
return len(self.imgs_path)
39+
40+
def __getitem__(self, index):
41+
img = cv2.imread(self.imgs_path[index])
42+
height, width, _ = img.shape
43+
44+
labels = self.words[index]
45+
annotations = np.zeros((0, 15))
46+
if len(labels) == 0:
47+
return annotations
48+
for idx, label in enumerate(labels):
49+
annotation = np.zeros((1, 15))
50+
# bbox
51+
annotation[0, 0] = label[0] # x1
52+
annotation[0, 1] = label[1] # y1
53+
annotation[0, 2] = label[0] + label[2] # x2
54+
annotation[0, 3] = label[1] + label[3] # y2
55+
56+
# landmarks
57+
annotation[0, 4] = label[4] # l0_x
58+
annotation[0, 5] = label[5] # l0_y
59+
annotation[0, 6] = label[7] # l1_x
60+
annotation[0, 7] = label[8] # l1_y
61+
annotation[0, 8] = label[10] # l2_x
62+
annotation[0, 9] = label[11] # l2_y
63+
annotation[0, 10] = label[13] # l3_x
64+
annotation[0, 11] = label[14] # l3_y
65+
annotation[0, 12] = label[16] # l4_x
66+
annotation[0, 13] = label[17] # l4_y
67+
if (annotation[0, 4]<0):
68+
annotation[0, 14] = -1
69+
else:
70+
annotation[0, 14] = 1
71+
72+
annotations = np.append(annotations, annotation, axis=0)
73+
target = np.array(annotations)
74+
if self.preproc is not None:
75+
img, target = self.preproc(img, target)
76+
77+
return torch.from_numpy(img), target
78+
79+
def detection_collate(batch):
80+
"""Custom collate fn for dealing with batches of images that have a different
81+
number of associated object annotations (bounding boxes).
82+
83+
Arguments:
84+
batch: (tuple) A tuple of tensor images and lists of annotations
85+
86+
Return:
87+
A tuple containing:
88+
1) (tensor) batch of images stacked on their 0 dim
89+
2) (list of tensors) annotations for a given image are stacked on 0 dim
90+
"""
91+
targets = []
92+
imgs = []
93+
for _, sample in enumerate(batch):
94+
for _, tup in enumerate(sample):
95+
if torch.is_tensor(tup):
96+
imgs.append(tup)
97+
elif isinstance(tup, type(np.empty(0))):
98+
annos = torch.from_numpy(tup).float()
99+
targets.append(annos)
100+
101+
return (torch.stack(imgs, 0), targets)
102+
103+
save_path = '/ssd_1t/derron/yolov5-face/data/widerface/train'
104+
aa=WiderFaceDetection("/ssd_1t/derron/yolov5-face/data/widerface/widerface/train/label.txt")
105+
for i in range(len(aa.imgs_path)):
106+
print(i, aa.imgs_path[i])
107+
img = cv2.imread(aa.imgs_path[i])
108+
base_img = os.path.basename(aa.imgs_path[i])
109+
base_txt = os.path.basename(aa.imgs_path[i])[:-4] +".txt"
110+
save_img_path = os.path.join(save_path, base_img)
111+
save_txt_path = os.path.join(save_path, base_txt)
112+
with open(save_txt_path, "w") as f:
113+
height, width, _ = img.shape
114+
labels = aa.words[i]
115+
annotations = np.zeros((0, 14))
116+
if len(labels) == 0:
117+
continue
118+
for idx, label in enumerate(labels):
119+
annotation = np.zeros((1, 14))
120+
# bbox
121+
label[0] = max(0, label[0])
122+
label[1] = max(0, label[1])
123+
label[2] = min(width - 1, label[2])
124+
label[3] = min(height - 1, label[3])
125+
annotation[0, 0] = (label[0] + label[2] / 2) / width # cx
126+
annotation[0, 1] = (label[1] + label[3] / 2) / height # cy
127+
annotation[0, 2] = label[2] / width # w
128+
annotation[0, 3] = label[3] / height # h
129+
#if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8:
130+
# img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127
131+
# continue
132+
# landmarks
133+
annotation[0, 4] = label[4] / width # l0_x
134+
annotation[0, 5] = label[5] / height # l0_y
135+
annotation[0, 6] = label[7] / width # l1_x
136+
annotation[0, 7] = label[8] / height # l1_y
137+
annotation[0, 8] = label[10] / width # l2_x
138+
annotation[0, 9] = label[11] / height # l2_y
139+
annotation[0, 10] = label[13] / width # l3_x
140+
annotation[0, 11] = label[14] / height # l3_y
141+
annotation[0, 12] = label[16] / width # l4_x
142+
annotation[0, 13] = label[17] / height # l4_y
143+
str_label="0 "
144+
for i in range(len(annotation[0])):
145+
str_label =str_label+" "+str(annotation[0][i])
146+
str_label = str_label.replace('[', '').replace(']', '')
147+
str_label = str_label.replace(',', '') + '\n'
148+
f.write(str_label)
149+
cv2.imwrite(save_img_path, img)
150+

data/scripts/get_argoverse_hd.sh

+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/bin/bash
2+
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
3+
# Download command: bash data/scripts/get_argoverse_hd.sh
4+
# Train command: python train.py --data argoverse_hd.yaml
5+
# Default dataset location is next to /yolov5:
6+
# /parent_folder
7+
# /argoverse
8+
# /yolov5
9+
10+
# Download/unzip images
11+
d='../argoverse/' # unzip directory
12+
mkdir $d
13+
url=https://argoverse-hd.s3.us-east-2.amazonaws.com/
14+
f=Argoverse-HD-Full.zip
15+
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background
16+
wait # finish background tasks
17+
18+
cd ../argoverse/Argoverse-1.1/
19+
ln -s tracking images
20+
21+
cd ../Argoverse-HD/annotations/
22+
23+
python3 - "$@" <<END
24+
import json
25+
from pathlib import Path
26+
27+
annotation_files = ["train.json", "val.json"]
28+
print("Converting annotations to YOLOv5 format...")
29+
30+
for val in annotation_files:
31+
a = json.load(open(val, "rb"))
32+
33+
label_dict = {}
34+
for annot in a['annotations']:
35+
img_id = annot['image_id']
36+
img_name = a['images'][img_id]['name']
37+
img_label_name = img_name[:-3] + "txt"
38+
39+
obj_class = annot['category_id']
40+
x_center, y_center, width, height = annot['bbox']
41+
x_center = (x_center + width / 2) / 1920. # offset and scale
42+
y_center = (y_center + height / 2) / 1200. # offset and scale
43+
width /= 1920. # scale
44+
height /= 1200. # scale
45+
46+
img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']]
47+
48+
Path(img_dir).mkdir(parents=True, exist_ok=True)
49+
50+
if img_dir + "/" + img_label_name not in label_dict:
51+
label_dict[img_dir + "/" + img_label_name] = []
52+
53+
label_dict[img_dir + "/" + img_label_name].append(f"{obj_class} {x_center} {y_center} {width} {height}\n")
54+
55+
for filename in label_dict:
56+
with open(filename, "w") as file:
57+
for string in label_dict[filename]:
58+
file.write(string)
59+
60+
END
61+
62+
mv ./labels ../../Argoverse-1.1/

0 commit comments

Comments
 (0)