Skip to content

Commit 2eb98c8

Browse files
authored
camshift and meanshift and webcam drivers scripts added
1 parent a282abe commit 2eb98c8

File tree

4 files changed

+401
-2
lines changed

4 files changed

+401
-2
lines changed

CAMShiftTrack.py

+148
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
'''
2+
Created on Sep 9, 2017
3+
4+
@author: inayat
5+
'''
6+
7+
# import the required packages
8+
from imutils.video import WebcamVideoStream
9+
#from imutils.video import FPS
10+
import numpy as np
11+
import argparse
12+
import imutils
13+
import time
14+
import cv2
15+
16+
from utils.fps2 import FPS2
17+
18+
import dlib
19+
20+
from trackers.camshifttracker import CAMShiftTracker
21+
22+
23+
24+
if __name__ == '__main__':
25+
26+
27+
28+
29+
30+
print("[info] starting to read a webcam ...")
31+
capWebCam = WebcamVideoStream(0).start()
32+
time.sleep(1.0)
33+
34+
35+
# initialize dlib face detector
36+
37+
frontFaceDetector = dlib.get_frontal_face_detector()
38+
39+
40+
# meanShift tracker
41+
42+
camShifTracker = None
43+
44+
curWindow = None
45+
46+
# start the frame per second (FPS) counter
47+
#fps = FPS2().start()
48+
49+
50+
boolDetectFaceinfirsFrameOnly = True
51+
52+
53+
54+
# loop over the frames obtained from the webcam
55+
while True:
56+
# grab each frame from the threaded stream,
57+
# resize
58+
# it, and convert it to grayscale (while still retaining 3
59+
# channels)
60+
frame1 = capWebCam.read()
61+
frame = cv2.flip(frame1,1)
62+
63+
64+
#frame = imutils.resize(frame, width=450)
65+
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
66+
#frame = np.dstack([frame, frame, frame])
67+
68+
# display the size of the queue on the frame
69+
#cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
70+
# (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
71+
72+
73+
if boolDetectFaceinfirsFrameOnly:
74+
75+
faceRect = frontFaceDetector(frame, 0)
76+
77+
if(not len(faceRect) ):
78+
print("[info] Face not found")
79+
continue
80+
81+
82+
# start the frame per second (FPS) counter
83+
fps = FPS2().start()
84+
85+
bbox = faceRect[0]
86+
87+
88+
# convert dlib rect to opencv rect
89+
90+
curWindow = (int(bbox.left()), int(bbox.top()), int(bbox.right() - bbox.left()),
91+
int(bbox.bottom() - bbox.top()) )
92+
93+
# intialize the CAMShift Tracker
94+
camShifTracker = CAMShiftTracker(curWindow, frame)
95+
96+
boolDetectFaceinfirsFrameOnly = False
97+
98+
99+
100+
continue
101+
102+
103+
104+
105+
camShifTracker.computeNewWindow(frame)
106+
107+
x,y, w, h = camShifTracker.getCurWindow()
108+
109+
bkprojectImage = camShifTracker.getBackProjectedImage(frame)
110+
111+
cv2.imshow("CAMShift Face in Back Project Image", bkprojectImage)
112+
113+
114+
115+
# display the current window
116+
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 0, 0), 2, cv2.LINE_AA)
117+
118+
119+
rotatedWindow = camShifTracker.getRotatedWindow()
120+
#display rotated window
121+
cv2.polylines(frame, [rotatedWindow], True, (0,255,0), 2, cv2.LINE_AA)
122+
123+
124+
fps.update()
125+
cv2.putText(frame, "FPS: {:.2f}".format(fps.fps()),
126+
(10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
127+
128+
129+
# show the frame and update the FPS counter
130+
cv2.imshow("CAMShift Face Tracking", frame)
131+
132+
k = cv2.waitKey(10) & 0xff
133+
if k == 27:
134+
break
135+
136+
137+
138+
139+
# stop the timer and display FPS information
140+
fps.stop()
141+
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
142+
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
143+
144+
# do a bit of cleanup
145+
cv2.destroyAllWindows()
146+
capWebCam.stop()
147+
148+

Readme.md

+35-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11

22

3-
# Tracking Using OpenCV and Python-3.5
3+
# Tracking Using OpenCV >= 3.2 and Python-3.5
44

55

66
In this repository I will give some implementation of tracking algorithms.
@@ -69,13 +69,46 @@ This example, which is a single person tracker, shows the use of Kalman Filter
6969
---
7070
---
7171

72-
## Meanshift and Camshift
72+
## Meanshift and CAMshift
7373

74+
The MeanShift algorithm looks to object tracking as mode-seeking problem. Mean-shift was first presented by [Fukunaga et al in 1975](http://ieeexplore.ieee.org/document/1055330/). It is a non-parametric approach for finding the maxima of a density function. The process is an iterative approach that involves calculating and shifting the mean of a set of data points, which fall in a circle, in the direction of the mean shift vector and thus it is called Meanshift. The radius of the circle is also called window size.
75+
76+
The value of the radius does matter. Very small value will generate local maxima while veray large value of the radius will let the algo to find true maxima. If there are more than one mode then they will be merged. To handle this problem radius of the circle needs to be changed adaptively. This is done by CAMshift algorithm ( Continuously Adaptive Meanshift)
77+
78+
For the widow located at \\(x\\), the center of the mass \\(m(x)\\)of the neighboring points \\(x_i\\) is calculated as
79+
80+
$$m(x) = \frac{\sum_i{K(x - x_i)}x_i}{\sum_i{K(x - x_i)}}$$
81+
where \\( K\\) is the kernel used to decide the widow size and how the weights of different points will be accumulated
82+
83+
84+
### Tracking using MeanShift in OpenCV
85+
We will use the following steps for live tracking of face using web cam:
86+
87+
1. Detect the face, which we want to track, using dlib face detector. Compute the histogram of the face region using Hue Channel of the HSV color space. However, both H and S can be used. Its worthy to note that color information is very sensitive to lighting variations.
88+
89+
use calcHist() function of OpenCV for computing the histogram and normalize the values in the range [0,255]
90+
91+
92+
2. Find the back projected image for every new frame using calcBackPorject() function
93+
94+
3. Use meanShift() function to find the maxima in the backprojected image in the neigborhood of the old position. This alog finds the mode of the back projected image which obviously a confidence map of similarity between the color distribution of the face and the new image.
95+
96+
#### Remarks
97+
98+
The MeanShift trackers sometimes fails when the scale of the object of interest changes, because the tracker is initialized to the scale of the object in the first frame. Later when scale of the object is changed then the tracker window size does not match the actual size of the object. This problem is handeled by the CAMshift tracker.
99+
100+
#### usage:
101+
102+
$ python meanShiftTrack.py
103+
104+
### Tracking using CAMshift in OpenCV
105+
CAMshift tries to tackle the scale problem by using varying window size for applying meanshift. CamShift was developed by [Gary Bradski in 1998](http://dl.acm.org/citation.cfm?id=836819)
74106
___
75107
___
76108

77109

78110
## *References*
111+
79112
1. [Pyimagesearch Adrian Rosebrock](http://www.pyimagesearch.com/)
80113

81114
2. [Learn OpenCV, Satya Mallick](http://www.learnopencv.com)

fastframeratewebcam.py

+77
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
'''
2+
Created on Sep 9, 2017
3+
4+
@author: inayat
5+
'''
6+
7+
# import the required packages
8+
from imutils.video import WebcamVideoStream
9+
#from imutils.video import FPS
10+
import numpy as np
11+
import argparse
12+
import imutils
13+
import time
14+
import cv2
15+
16+
from utils.fps2 import FPS2
17+
18+
19+
20+
21+
if __name__ == '__main__':
22+
23+
24+
25+
26+
27+
print("[info] starting to read a webcam ...")
28+
capWebCam = WebcamVideoStream(0).start()
29+
time.sleep(1.0)
30+
31+
# start the frame per second (FPS) counter
32+
fps = FPS2().start()
33+
34+
35+
36+
# loop over the frames obtained from the webcam
37+
while True:
38+
# grab each frame from the threaded stream,
39+
# resize
40+
# it, and convert it to grayscale (while still retaining 3
41+
# channels)
42+
frame1 = capWebCam.read()
43+
frame = cv2.flip(frame1,1)
44+
#frame = imutils.resize(frame, width=450)
45+
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
46+
#frame = np.dstack([frame, frame, frame])
47+
48+
# display the size of the queue on the frame
49+
#cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
50+
# (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
51+
52+
53+
fps.update()
54+
cv2.putText(frame, "FPS: {:.2f}".format(fps.fps()),
55+
(10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
56+
57+
58+
# show the frame and update the FPS counter
59+
cv2.imshow("Face Tracking", frame)
60+
61+
k = cv2.waitKey(10) & 0xff
62+
if k == 27:
63+
break
64+
65+
66+
67+
68+
# stop the timer and display FPS information
69+
fps.stop()
70+
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
71+
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
72+
73+
# do a bit of cleanup
74+
cv2.destroyAllWindows()
75+
capWebCam.stop()
76+
77+

0 commit comments

Comments
 (0)