Skip to content

Commit 621b2e2

Browse files
committed
Add documentation
1 parent a175938 commit 621b2e2

File tree

4 files changed

+162
-13
lines changed

4 files changed

+162
-13
lines changed

DOWNLOAD_WEIGHTS.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
## Instructions to download pretrained neural-network weights.
2-
1+
## Download pretrained neural-network weights.
32
[[Webpage](https://adipandas.github.io/multi-object-tracker/)]
43
[[GitHub](https://github.com/adipandas/multi-object-tracker)]
54

65
##### YOLOv3
6+
77
```
88
cd multi-object-tracker
99
cd ./examples/pretrained_models/yolo_weights

README.md

+2-10
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ Easy to use implementation of various multi-object tracking algorithms.
1212
![Cars with YOLO][cars-yolo-output] | ![Cows with tf-SSD][cows-tf-ssd-output]
1313
Video source: [link](https://flic.kr/p/L6qyxj) | Video source: [link](https://flic.kr/p/26WeEWy)
1414

15-
1615
## Available Multi Object Trackers
1716

1817
```
@@ -53,24 +52,19 @@ The interface for each tracker is simple and similar. Please refer the example t
5352

5453
```
5554
from motrackers import CentroidTracker # or IOUTracker, CentroidKF_Tracker, SORT
56-
5755
input_data = ...
5856
detector = ...
5957
tracker = CentroidTracker(...) # or IOUTracker(...), CentroidKF_Tracker(...), SORT(...)
60-
6158
while True:
6259
done, image = <read(input_data)>
6360
if done:
6461
break
65-
6662
detection_bboxes, detection_confidences, detection_class_ids = detector.detect(image)
6763
# NOTE:
6864
# * `detection_bboxes` are numpy.ndarray of shape (n, 4) with each row containing (bb_left, bb_top, bb_width, bb_height)
6965
# * `detection_confidences` are numpy.ndarray of shape (n,);
7066
# * `detection_class_ids` are numpy.ndarray of shape (n,).
71-
7267
output_tracks = tracker.track(detection_bboxes, detection_confidences, detection_class_ids)
73-
7468
# `output_tracks` is a list with each element containing tuple of
7569
# (<frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>)
7670
for track in output_tracks:
@@ -79,21 +73,19 @@ while True:
7973
print(track)
8074
```
8175

82-
Please refer [examples](https://github.com/adipandas/multi-object-tracker/tree/master/examples) folder of this repository for more details.
83-
You can clone and run the examples as shown [here](examples/readme.md).
76+
Please refer [examples](https://github.com/adipandas/multi-object-tracker/tree/master/examples) folder of this repository for more details. You can clone and run the examples.
8477

8578
## Pretrained object detection models
8679

8780
You will have to download the pretrained weights for the neural-network models.
8881
The shell scripts for downloading these are provided [here](https://github.com/adipandas/multi-object-tracker/tree/master/examples/pretrained_models) below respective folders.
89-
Please refer [DOWNLOAD_WEIGHTS.md](DOWNLOAD_WEIGHTS.md) for more details.
82+
Please refer [DOWNLOAD_WEIGHTS.md](https://github.com/adipandas/multi-object-tracker/blob/master/DOWNLOAD_WEIGHTS.md) for more details.
9083

9184
### Notes
9285
* There are some variations in implementations as compared to what appeared in papers of `SORT` and `IoU Tracker`.
9386
* In case you find any bugs in the algorithm, I will be happy to accept your pull request or you can create an issue to point it out.
9487

9588
## References, Credits and Contributions
96-
9789
Please see [REFERENCES.md](docs/readme/REFERENCES.md) and [CONTRIBUTING.md](docs/readme/CONTRIBUTING.md).
9890

9991
## Citation

motrackers/track.py

+95-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
class Track:
66
"""
7-
Track
7+
Track containing attributes to track various objects.
88
99
Parameters
1010
----------
@@ -22,6 +22,8 @@ class Track:
2222
Number of times the object or track was not tracked by tracker in consecutive frames.
2323
iou_score : float
2424
Intersection over union score.
25+
data_output_format: str
26+
Output format for data in tracker. Options ``['mot_challenge', 'visdrone_challenge']``. Default is ``mot_challenge``.
2527
kwargs : dict
2628
Additional key word arguments.
2729
@@ -163,6 +165,9 @@ def get_vis_drone_format(self):
163165
return mot_tuple
164166

165167
def predict(self):
168+
"""
169+
Implement to prediction the next estimate of track.
170+
"""
166171
raise NotImplemented
167172

168173
@staticmethod
@@ -173,6 +178,31 @@ def print_all_track_output_formats():
173178
class KFTrackSORT(Track):
174179
"""
175180
Track based on Kalman filter tracker used for SORT MOT-Algorithm.
181+
182+
Parameters
183+
----------
184+
track_id : int
185+
Track Id
186+
frame_id : int
187+
Camera frame id.
188+
bbox : numpy.ndarray
189+
Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
190+
detection_confidence : float
191+
Detection confidence of the object (probability).
192+
class_id : Object
193+
Class label id.
194+
lost : int
195+
Number of times the object or track was not tracked by tracker in consecutive frames.
196+
iou_score : float
197+
Intersection over union score.
198+
data_output_format: str
199+
Output format for data in tracker. Options ``['mot_challenge', 'visdrone_challenge']``. Default is ``mot_challenge``.
200+
process_noise_scale: float
201+
Process noise covariance scale or covariance magnitude as scalar value.
202+
measurement_noise_scale: float
203+
Measurement noise covariance scale or covariance magnitude as scalar value.
204+
kwargs : dict
205+
Additional key word arguments.
176206
"""
177207
def __init__(self, track_id, frame_id, bbox, detection_confidence, class_id=None, lost=0, iou_score=0.,
178208
data_output_format='mot_challenge', process_noise_scale=1.0, measurement_noise_scale=1.0, **kwargs):
@@ -183,6 +213,13 @@ def __init__(self, track_id, frame_id, bbox, detection_confidence, class_id=None
183213
iou_score=iou_score, data_output_format=data_output_format, **kwargs)
184214

185215
def predict(self):
216+
"""
217+
Predicts the next estimate of the bounding box of the track.
218+
219+
Returns:
220+
numpy.ndarray: Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
221+
222+
"""
186223
if (self.kf.x[6] + self.kf.x[2]) <= 0:
187224
self.kf.x[6] *= 0.0
188225

@@ -206,6 +243,31 @@ def update(self, frame_id, bbox, detection_confidence, class_id=None, lost=0, io
206243
class KFTrack4DSORT(Track):
207244
"""
208245
Track based on Kalman filter tracker used for SORT MOT-Algorithm.
246+
247+
Parameters
248+
----------
249+
track_id : int
250+
Track Id
251+
frame_id : int
252+
Camera frame id.
253+
bbox : numpy.ndarray
254+
Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
255+
detection_confidence : float
256+
Detection confidence of the object (probability).
257+
class_id : Object
258+
Class label id.
259+
lost : int
260+
Number of times the object or track was not tracked by tracker in consecutive frames.
261+
iou_score : float
262+
Intersection over union score.
263+
data_output_format: str
264+
Output format for data in tracker. Options ``['mot_challenge', 'visdrone_challenge']``. Default is ``mot_challenge``.
265+
process_noise_scale: float
266+
Process noise covariance scale or covariance magnitude as scalar value.
267+
measurement_noise_scale: float
268+
Measurement noise covariance scale or covariance magnitude as scalar value.
269+
kwargs : dict
270+
Additional key word arguments.
209271
"""
210272
def __init__(self, track_id, frame_id, bbox, detection_confidence, class_id=None, lost=0, iou_score=0.,
211273
data_output_format='mot_challenge', process_noise_scale=1.0, measurement_noise_scale=1.0,
@@ -230,6 +292,31 @@ def update(self, frame_id, bbox, detection_confidence, class_id=None, lost=0, io
230292
class KFTrackCentroid(Track):
231293
"""
232294
Track based on Kalman filter used for Centroid Tracking of bounding box in MOT.
295+
296+
Parameters
297+
----------
298+
track_id : int
299+
Track Id
300+
frame_id : int
301+
Camera frame id.
302+
bbox : numpy.ndarray
303+
Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
304+
detection_confidence : float
305+
Detection confidence of the object (probability).
306+
class_id : Object
307+
Class label id.
308+
lost : int
309+
Number of times the object or track was not tracked by tracker in consecutive frames.
310+
iou_score : float
311+
Intersection over union score.
312+
data_output_format: str
313+
Output format for data in tracker. Options ``['mot_challenge', 'visdrone_challenge']``. Default is ``mot_challenge``.
314+
process_noise_scale: float
315+
Process noise covariance scale or covariance magnitude as scalar value.
316+
measurement_noise_scale: float
317+
Measurement noise covariance scale or covariance magnitude as scalar value.
318+
kwargs : dict
319+
Additional key word arguments.
233320
"""
234321
def __init__(self, track_id, frame_id, bbox, detection_confidence, class_id=None, lost=0, iou_score=0.,
235322
data_output_format='mot_challenge', process_noise_scale=1.0, measurement_noise_scale=1.0, **kwargs):
@@ -239,6 +326,13 @@ def __init__(self, track_id, frame_id, bbox, detection_confidence, class_id=None
239326
iou_score=iou_score, data_output_format=data_output_format, **kwargs)
240327

241328
def predict(self):
329+
"""
330+
Predicts the next estimate of the bounding box of the track.
331+
332+
Returns:
333+
numpy.ndarray: Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
334+
335+
"""
242336
s = self.kf.predict()
243337
xmid, ymid = s[0], s[3]
244338
w, h = self.bbox[2], self.bbox[3]

motrackers/tracker_img.py

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import argparse
2+
import time
3+
import cv2
4+
5+
6+
ap = argparse.ArgumentParser()
7+
ap.add_argument("-v", "--video", type=str, default='../examples/video_data/cars.mp4', help="path to input video file")
8+
ap.add_argument("-t", "--tracker", type=str, default="kcf", help="OpenCV object tracker type")
9+
args = vars(ap.parse_args())
10+
11+
OPENCV_OBJECT_TRACKERS = {
12+
"csrt": cv2.TrackerCSRT_create,
13+
"kcf": cv2.TrackerKCF_create,
14+
"boosting": cv2.TrackerBoosting_create,
15+
"mil": cv2.TrackerMIL_create,
16+
"tld": cv2.TrackerTLD_create,
17+
"medianflow": cv2.TrackerMedianFlow_create,
18+
"mosse": cv2.TrackerMOSSE_create
19+
}
20+
21+
trackers = cv2.MultiTracker_create()
22+
23+
vs = cv2.VideoCapture(args["video"])
24+
25+
while True:
26+
ok, frame = vs.read()
27+
if not ok:
28+
break
29+
30+
# resize the frame (so we can process it faster)
31+
frame = cv2.resize(frame, (600, 400))
32+
33+
(success, boxes) = trackers.update(frame)
34+
print(success)
35+
36+
for box in boxes:
37+
(x, y, w, h) = [int(v) for v in box]
38+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
39+
40+
cv2.imshow("Frame", frame)
41+
key = cv2.waitKey(1) & 0xFF
42+
43+
# if the 's' key is selected, we are going to "select" a bounding
44+
# box to tracks
45+
if key == ord("s"):
46+
# select the bounding box of the object we want to track (make
47+
# sure you press ENTER or SPACE after selecting the ROI)
48+
box = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True)
49+
50+
# create a new object tracker for the bounding box and add it to our multi-object tracker
51+
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
52+
trackers.add(tracker, frame, box)
53+
54+
elif key == ord("q"): # if the `q` key was pressed, break from the loop
55+
break
56+
57+
time.sleep(0.1)
58+
59+
# if we are using a webcam, release the pointer
60+
vs.release()
61+
62+
# close all windows
63+
cv2.destroyAllWindows()

0 commit comments

Comments
 (0)