37
37
# Import OpenCV and numpy
38
38
import cv2
39
39
import numpy as np
40
+ from math import tan , pi
40
41
41
42
"""
42
43
In this section, we will set up the functions that will translate the camera
@@ -112,17 +113,35 @@ def callback(frame):
112
113
pipe .start (cfg , callback )
113
114
114
115
try :
116
+ # Set up an OpenCV window to visualize the results
117
+ WINDOW_TITLE = 'Realsense'
118
+ cv2 .namedWindow (WINDOW_TITLE , cv2 .WINDOW_NORMAL )
119
+
120
+ # Configure the OpenCV stereo algorithm. See
121
+ # https://docs.opencv.org/3.4/d2/d85/classcv_1_1StereoSGBM.html for a
122
+ # description of the parameters
123
+ window_size = 5
124
+ min_disp = 0
125
+ # must be divisible by 16
126
+ num_disp = 112 - min_disp
127
+ max_disp = min_disp + num_disp
128
+ stereo = cv2 .StereoSGBM_create (minDisparity = min_disp ,
129
+ numDisparities = num_disp ,
130
+ blockSize = 16 ,
131
+ P1 = 8 * 3 * window_size ** 2 ,
132
+ P2 = 32 * 3 * window_size ** 2 ,
133
+ disp12MaxDiff = 1 ,
134
+ uniquenessRatio = 10 ,
135
+ speckleWindowSize = 100 ,
136
+ speckleRange = 32 )
137
+
115
138
# Retreive the stream and intrinsic properties for both cameras
116
139
profiles = pipe .get_active_profile ()
117
140
streams = {"left" : profiles .get_stream (rs .stream .fisheye , 1 ).as_video_stream_profile (),
118
141
"right" : profiles .get_stream (rs .stream .fisheye , 2 ).as_video_stream_profile ()}
119
142
intrinsics = {"left" : streams ["left" ].get_intrinsics (),
120
143
"right" : streams ["right" ].get_intrinsics ()}
121
144
122
- # Set up an OpenCV window to visualize the results
123
- WINDOW_TITLE = 'Realsense'
124
- cv2 .namedWindow (WINDOW_TITLE , cv2 .WINDOW_NORMAL )
125
-
126
145
# Print information about both cameras
127
146
print ("Left camera:" , intrinsics ["left" ])
128
147
print ("Right camera:" , intrinsics ["right" ])
@@ -134,78 +153,65 @@ def callback(frame):
134
153
D_right = fisheye_distortion (intrinsics ["right" ])
135
154
(width , height ) = (intrinsics ["left" ].width , intrinsics ["left" ].height )
136
155
137
- # Get thre relative extrinsics between the left and right camera
156
+ # Get the relative extrinsics between the left and right camera
138
157
(R , T ) = get_extrinsics (streams ["left" ], streams ["right" ])
139
158
140
- # Use OpenCV stereoRectify to compute a rectifying transform composed of two
141
- # rotations, two projection matrices, and a Q matrix that transforms
142
- # disparity to 3D points
143
- (R_left , R_right , P_left , P_right , Q ) = \
144
- cv2 .fisheye .stereoRectify (K1 = K_left ,
145
- D1 = D_left ,
146
- K2 = K_right ,
147
- D2 = D_right ,
148
- imageSize = (width , height ),
149
- R = R ,
150
- tvec = T ,
151
- flags = cv2 .CALIB_ZERO_DISPARITY ,
152
- newImageSize = (width , height ),
153
- balance = 0 ,
154
- fov_scale = 1.0 )[0 :5 ]
155
-
156
- # Make sure the undisorted principal point is in the center of the image
157
- P_left [0 ][2 ] = P_right [0 ][2 ] = width / 2
158
- P_left [1 ][2 ] = P_right [1 ][2 ] = height / 2
159
+ # We need to determine what focal length our undistorted images should have
160
+ # in order to set up the camera matrices for initUndistortRectifyMap. We
161
+ # could use stereoRectify, but here we show how to derive these projection
162
+ # matrices from the calibration and a desired height and field of view
163
+
164
+ # We calculate the undistorted focal length:
165
+ #
166
+ # h
167
+ # -----------------
168
+ # \ | /
169
+ # \ | f /
170
+ # \ | /
171
+ # \ fov /
172
+ # \|/
173
+ stereo_fov_rad = 90 * (pi / 180 ) # 90 degree desired fov
174
+ stereo_height_px = 300 # 300x300 pixel stereo output
175
+ stereo_focal_px = stereo_height_px / 2 / tan (stereo_fov_rad / 2 )
176
+
177
+ # We set the left rotation to identity and the right rotation
178
+ # the rotation between the cameras
179
+ R_left = np .eye (3 )
180
+ R_right = R
181
+
182
+ # The stereo algorithm needs max_disp extra pixels in order to produce valid
183
+ # disparity on the desired output region. This changes the width, but the
184
+ # center of projection should be on the center of the cropped image
185
+ stereo_width_px = stereo_height_px + max_disp
186
+ stereo_size = (stereo_width_px , stereo_height_px )
187
+ stereo_cx = (stereo_height_px - 1 )/ 2 + max_disp
188
+ stereo_cy = (stereo_height_px - 1 )/ 2
189
+
190
+ # Construct the left and right projection matrices, the only difference is
191
+ # that the right projection matrix should have a shift along the x axis of
192
+ # baseline*focal_length
193
+ P_left = np .array ([[stereo_focal_px , 0 , stereo_cx , 0 ],
194
+ [0 , stereo_focal_px , stereo_cy , 0 ],
195
+ [0 , 0 , 1 , 0 ]])
196
+ P_right = P_left .copy ()
197
+ P_right [0 ][3 ] = T [0 ]* stereo_focal_px
198
+
199
+ # Construct Q for use with cv2.reprojectImageTo3D. Subtract max_disp from x
200
+ # since we will crop the disparity later
201
+ Q = np .array ([[1 , 0 , 0 , - (stereo_cx - max_disp )],
202
+ [0 , 1 , 0 , - stereo_cy ],
203
+ [0 , 0 , 0 , stereo_focal_px ],
204
+ [0 , 0 , - 1 / T [0 ], 0 ]])
159
205
160
206
# Create an undistortion map for the left and right camera which applies the
161
207
# rectification and undoes the camera distortion. This only has to be done
162
208
# once
163
209
m1type = cv2 .CV_32FC1
164
- (lm1 , lm2 ) = cv2 .fisheye .initUndistortRectifyMap (K_left , D_left , R_left , P_left , ( width , height ) , m1type )
165
- (rm1 , rm2 ) = cv2 .fisheye .initUndistortRectifyMap (K_right , D_right , R_right , P_right , ( width , height ) , m1type )
210
+ (lm1 , lm2 ) = cv2 .fisheye .initUndistortRectifyMap (K_left , D_left , R_left , P_left , stereo_size , m1type )
211
+ (rm1 , rm2 ) = cv2 .fisheye .initUndistortRectifyMap (K_right , D_right , R_right , P_right , stereo_size , m1type )
166
212
undistort_rectify = {"left" : (lm1 , lm2 ),
167
213
"right" : (rm1 , rm2 )}
168
214
169
- # Configure the OpenCV stereo algorithm. See
170
- # https://docs.opencv.org/3.4/d2/d85/classcv_1_1StereoSGBM.html for a
171
- # description of the parameters
172
- window_size = 3
173
- min_disp = 0
174
- # must be divisible by 16
175
- num_disp = 112 - min_disp
176
- max_disp = min_disp + num_disp
177
- stereo = cv2 .StereoSGBM_create (minDisparity = min_disp ,
178
- numDisparities = num_disp ,
179
- blockSize = 16 ,
180
- P1 = 8 * 3 * window_size ** 2 ,
181
- P2 = 32 * 3 * window_size ** 2 ,
182
- disp12MaxDiff = 1 ,
183
- uniquenessRatio = 10 ,
184
- speckleWindowSize = 100 ,
185
- speckleRange = 32 )
186
-
187
- # Since the field of view of T265 is very wide, stereo computed on the edges
188
- # of the images are not very informative. So, here we set up a region to
189
- # crop in the center of the image. We also need to modify Q since we have
190
- # adjusted the center of the disparity image
191
- half_center_size = int ((height / 3 )/ 2 )
192
- (rs ,re ) = (int (height / 2 - half_center_size ), int (height / 2 + half_center_size ))
193
- (cs ,ce ) = (int ( width / 2 - half_center_size ), int ( width / 2 + half_center_size ))
194
- (Q [0 ][3 ], Q [1 ][3 ]) = (- half_center_size , - half_center_size )
195
-
196
- # Calculate the undistorted field of view
197
- import math
198
- focal_length = Q [2 ][3 ]
199
- normalized_half_height = (re - rs )/ focal_length / 2
200
- normalized_half_width = (ce - cs )/ focal_length / 2
201
- horizontal_fov = 2 * math .atan (normalized_half_width )* 180 / math .pi
202
- vertical_fov = 2 * math .atan (normalized_half_height )* 180 / math .pi
203
- print ("Undistorted FOV: %.1fdeg wide, %.1fdeg high" % (horizontal_fov , vertical_fov ))
204
-
205
- # start with max_disp more pixels if possible, since SGBM leaves an empty max_disp pixels on the left
206
- cs_offset = min (max_disp ,cs )
207
- cs -= cs_offset
208
-
209
215
mode = "stack"
210
216
while True :
211
217
# Check if the camera has acquired any frames
@@ -225,22 +231,22 @@ def callback(frame):
225
231
center_undistorted = {"left" : cv2 .remap (src = frame_copy ["left" ],
226
232
map1 = undistort_rectify ["left" ][0 ],
227
233
map2 = undistort_rectify ["left" ][1 ],
228
- interpolation = cv2 .INTER_LINEAR )[ rs : re , cs : ce ] ,
234
+ interpolation = cv2 .INTER_LINEAR ),
229
235
"right" : cv2 .remap (src = frame_copy ["right" ],
230
236
map1 = undistort_rectify ["right" ][0 ],
231
237
map2 = undistort_rectify ["right" ][1 ],
232
- interpolation = cv2 .INTER_LINEAR )[ rs : re , cs : ce ] }
238
+ interpolation = cv2 .INTER_LINEAR )}
233
239
234
240
# compute the disparity on the center of the frames and convert it to a pixel disparity (divide by DISP_SCALE=16)
235
241
disparity = stereo .compute (center_undistorted ["left" ], center_undistorted ["right" ]).astype (np .float32 ) / 16.0
236
242
237
243
# re-crop just the valid part of the disparity
238
- disparity = disparity [:,cs_offset :]
244
+ disparity = disparity [:,max_disp :]
239
245
240
246
# convert disparity to 0-255 and color it
241
247
disp_vis = 255 * (disparity - min_disp )/ num_disp
242
248
disp_color = cv2 .applyColorMap (cv2 .convertScaleAbs (disp_vis ,1 ), cv2 .COLORMAP_JET )
243
- color_image = cv2 .cvtColor (center_undistorted ["left" ][:,cs_offset :], cv2 .COLOR_GRAY2RGB )
249
+ color_image = cv2 .cvtColor (center_undistorted ["left" ][:,max_disp :], cv2 .COLOR_GRAY2RGB )
244
250
245
251
if mode == "stack" :
246
252
cv2 .imshow (WINDOW_TITLE , np .hstack ((color_image , disp_color )))
0 commit comments