|
1 |
| -import cv2 |
2 |
| -import numpy |
3 |
| -from picamera2 import Picamera2, Preview, MappedArray |
4 |
| -from libcamera import Transform |
5 |
| -import time |
| 1 | +import argparse |
6 | 2 | import logging
|
7 |
| -import inspect |
8 |
| -import socket |
9 |
| -import struct |
10 |
| - |
11 |
| -# from keyboard import is_pressed |
12 |
| - |
13 |
| -picam2 = Picamera2() |
14 |
| -config = picam2.create_preview_configuration( |
15 |
| - main={"size": (320, 240)}, transform=Transform(hflip=1) |
| 3 | +from time import time, perf_counter, sleep |
| 4 | +from dataclasses import dataclass |
| 5 | +import socket # udp networking |
| 6 | +import struct # binary packing |
| 7 | +from picamera2 import Picamera2, Preview, MappedArray # Raspberry Pi camera |
| 8 | +from libcamera import Transform # taking selfies, so used to mirror image |
| 9 | +import cv2 # OpenCV, for blob detection |
| 10 | + |
| 11 | +print("\n\nSERVER: Starting PhilNav\n") |
| 12 | + |
| 13 | +preview_text = "Adjust the camera controls listed with --help such that you get a mostly black picture with bright white reflective IR sticker in the center. The controls default to what worked for me via trial and error." |
| 14 | + |
| 15 | +# parse command line arguments |
| 16 | +parser = argparse.ArgumentParser() |
| 17 | +parser.add_argument( |
| 18 | + "--ip", |
| 19 | + required=True, |
| 20 | + type=str, |
| 21 | + help="remote ip address of PC that will receive mouse movements", |
16 | 22 | )
|
17 |
| -picam2.configure(config) |
18 |
| -picam2.set_controls( |
19 |
| - { |
20 |
| - "AnalogueGain": 2.0, |
21 |
| - "Brightness": -0.4, |
22 |
| - "Contrast": 5, |
23 |
| - "ExposureValue": 1, |
24 |
| - "Saturation": 0, |
25 |
| - "FrameRate": 85, |
26 |
| - } |
| 23 | +parser.add_argument( |
| 24 | + "-p", "--port", type=int, default=4245, help="send to remote port, default 4245" |
| 25 | +) |
| 26 | +parser.add_argument( |
| 27 | + "-v", "--verbose", action="store_true", help="provide verbose logging" |
| 28 | +) |
| 29 | +parser.add_argument( |
| 30 | + "--preview", action="store_true", help="Use when logged into Raspberry Pi Gui; will show camera preview. " + preview_text |
| 31 | +) |
| 32 | +parser.add_argument( |
| 33 | + "--fps", type=float, default=75.0, help="camera FrameRate, default 75" |
| 34 | +) |
| 35 | +parser.add_argument( |
| 36 | + "--width", type=int, default=320, help="camera resolution width, default 320" |
| 37 | +) |
| 38 | +parser.add_argument( |
| 39 | + "--height", type=int, default=240, help="camera resolution height, default 240" |
| 40 | +) |
| 41 | +parser.add_argument( |
| 42 | + "--gain", type=float, default=2.0, help="camera AnalogueGain, default 2.0" |
| 43 | +) |
| 44 | +parser.add_argument( |
| 45 | + "--brightness", type=float, default=-0.4, help="camera Brightness, default -0.4" |
| 46 | +) |
| 47 | +parser.add_argument( |
| 48 | + "--contrast", type=float, default=5.0, help="camera Contrast, default 5.0" |
| 49 | +) |
| 50 | +parser.add_argument( |
| 51 | + "--exposure", type=float, default=1.0, help="camera ExposureValue, default 1.0" |
27 | 52 | )
|
28 |
| -picam2.start_preview(Preview.QT) |
29 |
| -# picam2.start_preview(Preview.NULL) |
| 53 | +parser.add_argument( |
| 54 | + "--saturation", type=float, default=0.0, help="camera Saturation, default 0.0" |
| 55 | +) |
| 56 | +parser.add_argument( |
| 57 | + "--no-hflip", action="store_true", help="images are selfies and flipped horizontally by default" |
| 58 | +) |
| 59 | +parser.add_argument( |
| 60 | + "--blob-color", type=int, default=255, help="OpenCV blob detection color, default 255 (white; I believe it's grayscale 0-255)" |
| 61 | +) |
| 62 | +args = parser.parse_args() |
| 63 | + |
| 64 | +if args.verbose: |
| 65 | + logging.getLogger().setLevel(logging.DEBUG) |
| 66 | + logging.info(" Logging verbosely\n") |
| 67 | + |
| 68 | +if args.preview: |
| 69 | + print(preview_text + "\n") |
| 70 | +else: |
| 71 | + print("If running PhilNav for the first time, use --help and --preview to set up your camera.\n") |
| 72 | + |
| 73 | +# The camera can be configured and controlled with different settings in each. |
| 74 | +# Not entirely sure the difference. |
| 75 | +config_main = { |
| 76 | + "size": (args.width, args.height) |
| 77 | +} |
| 78 | +if not args.no_hflip: |
| 79 | + config_main["transform"] = Transform(hflip=1) |
| 80 | +picam2 = Picamera2() |
| 81 | +# Not entirely sure how configurations work, preview/main etc. |
| 82 | +config = picam2.create_preview_configuration(main=config_main) |
| 83 | +picam2.configure(config) |
| 84 | + |
| 85 | +controls_main = { |
| 86 | + "AnalogueGain": args.gain, |
| 87 | + "Brightness": args.brightness, |
| 88 | + "Contrast": args.contrast, |
| 89 | + "ExposureValue": args.exposure, |
| 90 | + "Saturation": args.saturation, |
| 91 | + "FrameRate": args.fps |
| 92 | +} |
| 93 | +picam2.set_controls(controls_main) |
| 94 | + |
| 95 | +if args.preview: |
| 96 | + picam2.start_preview(Preview.QT) |
| 97 | +else: |
| 98 | + picam2.start_preview(Preview.NULL) |
| 99 | + |
| 100 | +# Not entirely sure the difference between start_preview and start. |
30 | 101 | picam2.start()
|
31 |
| -time.sleep(1) |
| 102 | +time.sleep(1) # let camera warm up |
32 | 103 |
|
| 104 | +# OpenCV blob detection config |
33 | 105 | params = cv2.SimpleBlobDetector_Params()
|
34 |
| -params.blobColor = 255 |
| 106 | +params.blobColor = args.blob_color |
35 | 107 | detector = cv2.SimpleBlobDetector_create(params)
|
36 | 108 |
|
37 |
| -# UDP_IP = "192.168.68.71" |
38 |
| -UDP_IP = "10.10.113.22" |
39 |
| -UDP_PORT = 4245 |
40 |
| -MESSAGE = None |
41 |
| -sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
| 109 | +# Set up UDP socket to receiving computer |
| 110 | +sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # datagrams over UDP |
| 111 | + |
42 | 112 |
|
43 |
| -frame = 0 |
44 |
| -x = 0 |
45 |
| -y = 0 |
46 |
| -keypoints = None |
47 |
| -started_at_ms = time.time() * 1000 |
48 |
| -frame_start_ms = time.time() * 1000 |
| 113 | +# Globals for storing data from loop-to-loop, also stats for debugging |
| 114 | +@dataclass |
| 115 | +class PhilNav: |
| 116 | + started_at = time() |
| 117 | + frame_started_at = time() |
| 118 | + frame_start = perf_counter() |
| 119 | + frame_num = 0 |
| 120 | + x = 0.0 |
| 121 | + y = 0.0 |
| 122 | + keypoint = None # for debugging inspection |
49 | 123 |
|
50 | 124 |
|
| 125 | +# This is where the Magic happens! The camera should pick up nothing but a white |
| 126 | +# dot from your reflective IR sticker. I use opencv blob detection to track its |
| 127 | +# (x, y) coordinates and send the changes to the receiving computer, which moves |
| 128 | +# the mouse. |
51 | 129 | def blobby(request):
|
| 130 | + # MappedArray gives direct access to the captured camera frame |
52 | 131 | with MappedArray(request, "main") as m:
|
53 |
| - global frame |
54 |
| - global x |
55 |
| - global y |
56 |
| - global keypoints |
57 |
| - global frame_start_ms |
58 |
| - x_diff = 0 |
59 |
| - y_diff = 0 |
60 |
| - |
61 |
| - frame = frame + 1 |
| 132 | + PhilNav.frame_num += 1 |
| 133 | + x_diff = 0.0 |
| 134 | + y_diff = 0.0 |
62 | 135 |
|
| 136 | + # Track the IR sticker |
63 | 137 | keypoints = detector.detect(m.array)
|
64 |
| - cv2.drawKeypoints( |
65 |
| - m.array, |
66 |
| - keypoints, |
67 |
| - m.array, |
68 |
| - (255, 0, 0), |
69 |
| - cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, |
70 |
| - ) |
| 138 | + if args.preview: |
| 139 | + # Draw red circles around the detected blobs, in-place on array |
| 140 | + cv2.drawKeypoints( |
| 141 | + m.array, # source image |
| 142 | + keypoints, |
| 143 | + m.array, # dest image |
| 144 | + (255, 0, 0), # RGB |
| 145 | + # For each keypoint the circle around keypoint with keypoint |
| 146 | + # size and orientation will be drawn. |
| 147 | + cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, |
| 148 | + ) |
71 | 149 |
|
| 150 | + # Ideally should be exactly one keypoint |
72 | 151 | if len(keypoints) > 0:
|
73 |
| - kp = keypoints[0] |
| 152 | + # Compare the (x, y) coordinates from last frame |
| 153 | + kp = PhilNav.keypoint = keypoints[0] |
74 | 154 | x_new, y_new = kp.pt
|
75 |
| - x_diff = x_new - x |
76 |
| - y_diff = y_new - y |
77 |
| - x = x_new |
78 |
| - y = y_new |
| 155 | + x_diff = x_new - PhilNav.x |
| 156 | + y_diff = y_new - PhilNav.y |
| 157 | + PhilNav.x = x_new |
| 158 | + PhilNav.y = y_new |
| 159 | + |
| 160 | + # If the mouse has moved smoothly, but not "jumped"... |
| 161 | + # Jumping can occur if multiple blobs are detected, such as other |
| 162 | + # IR reflective surfaces in the camera's view, like glasses lenses. |
79 | 163 | if (
|
80 | 164 | (x_diff**2 > 0 or y_diff**2 > 0)
|
81 | 165 | and x_diff**2 < 10
|
82 | 166 | and y_diff**2 < 10
|
83 | 167 | ):
|
84 |
| - MESSAGE = struct.pack("dddddd", x_diff, y_diff, 0, 0, 0, frame_start_ms) |
85 |
| - sock.sendto(MESSAGE, (UDP_IP, UDP_PORT)) |
86 |
| - |
87 |
| - if frame % 1 == 0: |
88 |
| - fps = frame / ((time.time() * 1000 - started_at_ms) / 1000) |
89 |
| - ms = time.time() * 1000 - frame_start_ms |
90 |
| - logging.warning( |
91 |
| - f"Frame: {frame}, Diff: ({int(x_diff)}, {int(y_diff)}), FPS: {int(fps)}, local-MS: {int(ms)}" |
| 168 | + # Send the (x_diff, y_diff) to the receiving computer. |
| 169 | + # For performance stats, I'm also sending the frame time on |
| 170 | + # Raspberry Pi; both absolute and relative. Absolute time doesn't |
| 171 | + # work well because the Raspberry Pi clock and PC clock will not |
| 172 | + # be synced to within 1 ms of each other. |
| 173 | + # |
| 174 | + # 48 bytes of 6 doubles in binary C format. Why? Because it's |
| 175 | + # OpenTrack's protocol. |
| 176 | + # struct.pack('dddddd', x, y, z, pitch, yaw, roll) |
| 177 | + # PhilNav uses x, y as x_diff, y_diff and moves the mouse |
| 178 | + # relative to its current position. |
| 179 | + # https://github.com/opentrack/opentrack/issues/747 |
| 180 | + time_spent = perf_counter() - PhilNav.frame_start |
| 181 | + MESSAGE = struct.pack( |
| 182 | + "dddddd", x_diff, y_diff, 0, 0, time_spent, PhilNav.frame_started_at) |
| 183 | + sock.sendto(MESSAGE, (args.ip, args.port)) |
| 184 | + |
| 185 | + # Log once per second |
| 186 | + if PhilNav.frame_nume % args.fps == 0: |
| 187 | + fps = PhilNav.frame_num / (time() - PhilNav.started_at) |
| 188 | + ms = (perf_counter() - PhilNav.frame_start) * 1000 |
| 189 | + logging.info( |
| 190 | + f"Frame: {PhilNav.frame_num}, Diff: ({int(x_diff)}, {int(y_diff)}), FPS: { |
| 191 | + int(fps)}, loc ms: {int(ms)}" |
92 | 192 | )
|
93 | 193 |
|
94 |
| - frame_start_ms = time.time() * 1000 |
95 |
| - |
96 |
| - |
97 |
| -picam2.pre_callback = blobby |
98 |
| -time.sleep(75) |
99 |
| - |
100 |
| -# started_at = time.time() |
101 |
| -# try: |
102 |
| -# while True: |
103 |
| -# if is_pressed('p'): |
104 |
| -# print(f"\rCaptured {filename} succesfully") |
105 |
| -# if is_pressed('q'): |
106 |
| -# print("\rClosing camera...") |
107 |
| -# break |
108 |
| -# frame = picam2.capture_array("main") |
109 |
| -# keypoints = detector.detect(frame) |
110 |
| -# mat_with_keypoints = cv2.drawKeypoints(frame, keypoints, numpy.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) |
111 |
| -# cv2.imshow("img", mat_with_keypoints) |
112 |
| -# |
113 |
| -# if time.time() - started_at > 5: |
114 |
| -# break |
115 |
| -# finally: |
| 194 | + # I'm setting these at the end rather than the beginning, because I want |
| 195 | + # to make sure to include the time capturing the image from the camera. |
| 196 | + PhilNav.frame_started_at = time() |
| 197 | + PhilNav.frame_start = perf_counter() |
| 198 | + |
| 199 | + |
| 200 | +# Run the loop forever until Ctrl-C |
| 201 | +try: |
| 202 | + picam2.pre_callback = blobby |
| 203 | + sleep(10000000) # run for one hundred days |
| 204 | +except KeyboardInterrupt: |
| 205 | + pass |
| 206 | + |
116 | 207 | picam2.stop_preview()
|
117 | 208 | picam2.stop()
|
118 | 209 | picam2.close()
|
119 |
| - |
120 |
| -# array = picam2.capture_array("main") |
121 |
| -# cv2.imshow("img", array); cv2.waitKey(0) |
122 |
| -# |
123 |
| -# src = cv2.imread("/home/philip/test.jpg", cv2.IMREAD_GRAYSCALE); |
124 |
| -# |
125 |
| -# params = cv2.SimpleBlobDetector_Params() |
126 |
| -# params.blobColor = 255 |
127 |
| -# detector = cv2.SimpleBlobDetector_create(params) |
128 |
| -# |
129 |
| -# keypoints = detector.detect(src); keypoints |
130 |
| -# im_with_keypoints = cv2.drawKeypoints(src, keypoints, numpy.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) |
131 |
| -# cv2.imshow("img", im_with_keypoints); cv2.waitKey(0) |
132 |
| - |
133 |
| -cv2.destroyAllWindows() |
|
0 commit comments