Skip to content

Commit 89bf73f

Browse files
committed
Move the template into a new directory as part of the share/ installation target, look for assets using the share path not the absolute path of the file. Add start_camera, stop_camera services to start & stop the camera pipeline
1 parent 78cd441 commit 89bf73f

File tree

4 files changed

+54
-9
lines changed

4 files changed

+54
-9
lines changed

turtlebot4_vision_tutorials/setup.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
(os.path.join('share', package_name, 'models'), glob('models/*.blob')),
1717
(os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')),
1818
(os.path.join('share', package_name, 'config'), glob('config/*.yaml')),
19+
(os.path.join('share', package_name, 'templates'), glob('templates/*.py')),
1920
],
2021
install_requires=['setuptools'],
2122
zip_safe=True,
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,14 +90,14 @@ def pd_postprocess(inference, crop_region):
9090
xnorm.append(xn)
9191
ynorm.append(yn)
9292
scores.append(inference[3*i+2])
93-
x.append(int(xmin + xn * size))
94-
y.append(int(ymin + yn * size))
95-
93+
x.append(int(xmin + xn * size))
94+
y.append(int(ymin + yn * size))
95+
9696
next_crop_region = determine_crop_region(scores, x, y) if ${_smart_crop} else init_crop_region
9797
return x, y, xnorm, ynorm, scores, next_crop_region
9898

9999
node.warn("Processing node started")
100-
# Defines the default crop region (pads the full image from both sides to make it a square image)
100+
# Defines the default crop region (pads the full image from both sides to make it a square image)
101101
# Used when the algorithm cannot reliably determine the crop region from the previous frame.
102102
init_crop_region = ${_init_crop_region}
103103
crop_region = init_crop_region

turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/MovenetDepthaiEdge.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,16 @@
1010

1111
from turtlebot4_vision_tutorials.FPS import FPS
1212

13-
SCRIPT_DIR = Path(__file__).resolve().parent
14-
MOVENET_LIGHTNING_MODEL = os.path.join(SCRIPT_DIR.parent,
13+
from ament_index_python.packages import get_package_share_directory
14+
15+
SHARE_DIR = get_package_share_directory('turtlebot4_vision_tutorials')
16+
MOVENET_LIGHTNING_MODEL = os.path.join(SHARE_DIR,
1517
"models",
1618
"movenet_singlepose_lightning_U8_transpose.blob")
17-
MOVENET_THUNDER_MODEL = os.path.join(SCRIPT_DIR.parent,
19+
MOVENET_THUNDER_MODEL = os.path.join(SHARE_DIR,
1820
"models",
1921
"movenet_singlepose_thunder_U8_transpose.blob")
22+
TEMPLATE_DIR = os.path.join(SHARE_DIR, 'templates')
2023

2124
# Dictionary that maps from joint names to keypoint indices.
2225
KEYPOINT_DICT = {
@@ -308,7 +311,7 @@ def build_processing_script(self):
308311
which is a python template
309312
'''
310313
# Read the template
311-
with open(os.path.join(SCRIPT_DIR, 'template_processing_script.py'), 'r') as file:
314+
with open(os.path.join(TEMPLATE_DIR, 'template_processing_script.py'), 'r') as file:
312315
template = Template(file.read())
313316

314317
# Perform the substitution
@@ -346,7 +349,6 @@ def pd_postprocess(self, inference):
346349
return body
347350

348351
def next_frame(self):
349-
350352
self.fps.update()
351353

352354
# Get the device camera frame if wanted

turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/pose_detection.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import rclpy
2525
from rclpy.node import Node
2626
from std_msgs.msg import String as string_msg
27+
from std_srvs.srv import Trigger
2728
from sensor_msgs.msg import Image
2829

2930
from irobot_create_msgs.msg import LightringLeds
@@ -113,6 +114,20 @@ def __init__(self):
113114
10,
114115
)
115116

117+
self._is_paused = False
118+
119+
self.start_camera_srv = self.create_service(
120+
Trigger,
121+
'start_camera',
122+
self.handle_start_camera
123+
)
124+
125+
self.stop_camera_srv = self.create_service(
126+
Trigger,
127+
'stop_camera',
128+
self.handle_stop_camera
129+
)
130+
116131
timer_period = 0.0833 # seconds
117132
self.timer = self.create_timer(timer_period, self.pose_detect)
118133

@@ -133,6 +148,9 @@ def __init__(self):
133148
# self.button_1_function()
134149

135150
def pose_detect(self):
151+
if self._is_paused:
152+
return
153+
136154
if not (POSE_DETECT_ENABLE):
137155
return
138156
# Run movenet on next frame
@@ -278,6 +296,30 @@ def autonomous_lights(self):
278296
# Publish the message
279297
self.lightring_publisher.publish(lightring_msg)
280298

299+
def handle_start_camera(self, req, resp):
300+
if self._is_paused:
301+
self._is_paused = False
302+
self.pose = MovenetDepthai(input_src='rgb',
303+
model='thunder',
304+
score_thresh=0.3,
305+
crop=not 'store_true',
306+
smart_crop=not 'store_true',
307+
internal_frame_height=432)
308+
resp.success = True
309+
else:
310+
resp.message = 'Device already running'
311+
return resp
312+
313+
def handle_stop_camera(self, req, resp):
314+
if not self._is_paused:
315+
self._is_paused = True
316+
self.pose.device.close()
317+
self.pose = None
318+
resp.success = True
319+
else:
320+
resp.message = 'Device already stopped'
321+
return resp
322+
281323

282324
def main(args=None):
283325
rclpy.init(args=args)

0 commit comments

Comments
 (0)