Skip to content

Commit f43c038

Browse files
author
Shuo
committed
rename project, add launch files
1 parent b053016 commit f43c038

26 files changed

+2583
-223
lines changed

.devcontainer/Dockerfile

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
FROM ros:melodic-perception
2+
3+
ENV CERES_VERSION="1.14.0"
4+
ENV CATKIN_WS=/root/vins_ws
5+
6+
# set up thread number for building
7+
RUN if [ "x$(nproc)" = "x1" ] ; then export USE_PROC=1 ; \
8+
else export USE_PROC=$(($(nproc)/2)) ; fi && \
9+
apt-get update && apt-get install -y \
10+
cmake \
11+
libatlas-base-dev \
12+
libeigen3-dev \
13+
libgoogle-glog-dev \
14+
libsuitesparse-dev \
15+
python-catkin-tools \
16+
ros-${ROS_DISTRO}-cv-bridge \
17+
ros-${ROS_DISTRO}-image-transport \
18+
ros-${ROS_DISTRO}-message-filters \
19+
ros-${ROS_DISTRO}-tf && \
20+
rm -rf /var/lib/apt/lists/* && \
21+
# Build and install Ceres
22+
git clone https://ceres-solver.googlesource.com/ceres-solver && \
23+
cd ceres-solver && \
24+
git checkout tags/${CERES_VERSION} && \
25+
mkdir build && cd build && \
26+
cmake .. && \
27+
make -j$(USE_PROC) install && \
28+
rm -rf ../../ceres-solver && \
29+
mkdir -p $CATKIN_WS/src/VINS-Fusion/
30+
31+
# Add VINS-Fusion
32+
WORKDIR $CATKIN_WS/src
33+
# use the following line if you only have this dockerfile
34+
RUN git clone https://github.com/HKUST-Aerial-Robotics/VINS-Fusion.git
35+
36+
#TODO: add visualizer
37+
38+
# Build VINS-Fusion
39+
WORKDIR $CATKIN_WS
40+
RUN /bin/bash -c "source /opt/ros/${ROS_DISTRO}/setup.bash; catkin build; source ${CATKIN_WS}/devel/setup.bash; catkin build"
41+
RUN echo "source ${CATKIN_WS}/devel/setup.bash" >> /root/.bashrc

.devcontainer/devcontainer.json

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
2+
// https://github.com/microsoft/vscode-dev-containers/tree/v0.238.0/containers/ubuntu
3+
{
4+
"name": "Ubuntu",
5+
"build": {
6+
"dockerfile": "Dockerfile",
7+
// Update 'VARIANT' to pick an Ubuntu version: jammy / ubuntu-22.04, focal / ubuntu-20.04, bionic /ubuntu-18.04
8+
// Use ubuntu-22.04 or ubuntu-18.04 on local arm64/Apple Silicon.
9+
"args": { "-t": "a1_vins_image"}
10+
},
11+
12+
// use this to connect to odrive without --privileged
13+
"runArgs": ["--network=host", "--cap-add=IPC_LOCK",
14+
"--cap-add=sys_nice","--device=/dev/input",
15+
"--device=/dev/bus", "--volume=/dev/serial:/dev/serial:ro",
16+
"--name=a1_vins_docker"],
17+
"workspaceMount": "source=${localWorkspaceFolder},target=/root/vins_ws/src/vilo,type=bind",
18+
"workspaceFolder": "/root/vins_ws/src/vilo"
19+
// Use 'forwardPorts' to make a list of ports inside the container available locally.
20+
// "forwardPorts": [],
21+
22+
// Use 'postCreateCommand' to run commands after the container is created.
23+
// "postCreateCommand": "uname -a",
24+
25+
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
26+
// "remoteUser": "vscode"
27+
}

CMakeLists.txt

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
cmake_minimum_required(VERSION 3.0.0)
2-
project(vileom)
2+
project(vilo)
33

44
set(CMAKE_BUILD_TYPE "Release")
55
#set(CMAKE_BUILD_TYPE "Debug")
@@ -38,7 +38,7 @@ include_directories(
3838

3939
catkin_package()
4040

41-
add_library(vileom_lib
41+
add_library(vilo_lib
4242
src/estimator/estimator.cpp
4343
src/featureTracker/feature_tracker.cpp
4444
src/featureTracker/feature_manager.cpp
@@ -61,11 +61,11 @@ add_library(vileom_lib
6161
src/initial/initial_sfm.cpp
6262
src/initial/initial_ex_rotation.cpp
6363
src/factor/imu_leg_factor.cpp src/factor/imu_leg_factor.h src/factor/imu_leg_integration_base.cpp src/factor/imu_leg_integration_base.h)
64-
target_link_libraries(vileom_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
64+
target_link_libraries(vilo_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
6565

6666
# this is actually the main function, not just the test...
67-
add_executable(vileom_feature_track_test src/feature_track_test.cpp )
68-
target_link_libraries(vileom_feature_track_test vileom_lib)
67+
add_executable(vilo_feature_track_test src/feature_track_test.cpp )
68+
target_link_libraries(vilo_feature_track_test vilo_lib)
6969

7070

7171
add_executable(ceres_test src/test/ceres_test.cpp src/legKinematics/A1Kinematics.cpp)

README.md

+2-15
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
This ROS package contains a visual-inertial-leg odometry (VILO) for Unitree A1 robot. The package name is currently called vileom, which is an obsolete name, which will be revised in later versions.
1+
This ROS package contains a visual-inertial-leg odometry (VILO) for Unitree A1 robot.
22

33
Our goal is to achieve low-cost minimum sensing on legged robots (sensing suites only has one IMU, one stereo camera, leg joint sensors. Total cost <$1000).
44

@@ -10,24 +10,11 @@ Here are two videos comparing the performance of the VILO and the VINS-Fusion:
1010
[![video2](https://img.youtube.com/vi/oNwwQ0l-O4U/0.jpg)](https://www.youtube.com/watch?v=oNwwQ0l-O4U)
1111

1212

13-
The package is only tested in Ubuntu 18.04 and ROS melodic.
1413

1514
## Installation
16-
Please follow VINS-Fusion's installation guide.
15+
use VScode docker container
1716

1817

19-
This package must also working together with
20-
21-
https://github.com/paulyang1990/A1_visualizer
22-
23-
and
24-
25-
https://github.com/paulyang1990/a1_launch
26-
27-
The A1_visualizer has its own dependencies. A1_launch is just a collection of launch files. Download these two packages (and their dependencies) and compile them together with the VILO (vileom).
28-
29-
The A1_launch contains scripts to run the VILO. And the VILO uses A1_visualizer to visualize the estimation results.
30-
3118
## Dataset
3219
A Google drive folder https://drive.google.com/drive/folders/13GsFDaBkDrslOl9BfE4AJnOn3ECDXVnc
3320

config/hardware_a1_vilo_config.yaml

+95
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
%YAML:1.0
2+
3+
#common parameters
4+
#support: 1 imu 1 cam; 1 imu 2 cam: 2 cam;
5+
imu: 1
6+
num_of_cam: 2
7+
8+
imu_topic: "/hardware_a1/imu"
9+
image0_topic: "/camera_forward/infra1/image_rect_raw"
10+
image1_topic: "/camera_forward/infra2/image_rect_raw"
11+
output_path: "/home/shuoy/nv_work/unitree_ws/src/tightly-coupled-visual-inertial-leg-odometry/output"
12+
13+
cam0_calib: "left.yaml"
14+
cam1_calib: "right.yaml"
15+
image_width: 640
16+
image_height: 480
17+
18+
# leg related
19+
use_leg_odom: 1
20+
num_of_leg: 4
21+
leg_topic: "/hardware_a1/joint_foot"
22+
optimize_leg_bias: 1
23+
joint_angle_n: 0.00001
24+
joint_velocity_n: 0.00001
25+
leg_bias_xy_n: 0.5
26+
leg_bias_z_n: 0.5
27+
# contact model
28+
v_n_force_thres_ratio: 0.8
29+
v_n_min: 0.0001
30+
v_n_max: 500.0
31+
# v_n_w1: 0.333
32+
# v_n_w2: 0.333
33+
# v_n_w3: 0.333
34+
v_n_term1_steep: 10
35+
v_n_term2_var_rescale: 1.0e-6
36+
v_n_term3_distance_rescale: 1.0e-3
37+
v_n_final_ratio: 0.1
38+
39+
# leg kinematics parameter
40+
lower_leg_length: 0.21
41+
42+
43+
# Extrinsic parameter between IMU and Camera.
44+
estimate_extrinsic: 1 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
45+
# 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
46+
47+
body_T_cam0: !!opencv-matrix
48+
rows: 4
49+
cols: 4
50+
dt: d
51+
data: [ 0, 0, 1, 0.2693,
52+
-1, 0, 0, 0.025,
53+
0, -1, 0, 0.067,
54+
0., 0., 0., 1. ]
55+
56+
body_T_cam1: !!opencv-matrix
57+
rows: 4
58+
cols: 4
59+
dt: d
60+
data: [ 0, 0, 1, 0.2693,
61+
-1, 0, 0, -0.025,
62+
0, -1, 0, 0.067,
63+
0., 0., 0., 1. ]
64+
65+
#Multiple thread support
66+
multiple_thread: 1
67+
68+
#feature traker paprameters
69+
max_cnt: 90 # max feature number in feature tracking
70+
min_dist: 20 # min distance between two features
71+
freq: 15 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
72+
F_threshold: 1.0 # ransac threshold (pixel)
73+
show_track: 1 # publish tracking image as topic
74+
flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy
75+
76+
#optimization parameters
77+
max_solver_time: 0.1 # max solver itration time (ms), to guarantee real time
78+
max_num_iterations: 16 # max solver itrations, to guarantee real time
79+
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
80+
81+
#imu parameters The more accurate parameters you provide, the better performance
82+
acc_n: 0.1 # accelerometer measurement noise standard deviation. #0.2 0.04
83+
gyr_n: 0.05 # gyroscope measurement noise standard deviation. #0.05 0.004
84+
acc_w: 0.004 # accelerometer bias random work noise standard deviation. #0.002
85+
gyr_w: 0.002 # gyroscope bias random work noise standard deviation. #4.0e-5
86+
g_norm: 9.805 # gravity magnitude
87+
88+
#unsynchronization parameters
89+
estimate_td: 1 # online estimate time offset between camera and imu
90+
td: 0.00240 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
91+
92+
#loop closure parameters
93+
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
94+
pose_graph_save_path: "/home/biorobotics/output/pose_graph/" # save and load path
95+
save_image: 0 # save image in pose graph for visualization prupose; you can close this function by setting 0

config/vins_fusion_real_sense_xsens.yaml config/hardware_a1_vins_config.yaml

+19-22
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,16 @@
55
imu: 1
66
num_of_cam: 2
77

8-
imu_topic: "/imu/data"
8+
imu_topic: "/hardware_a1/imu"
99
image0_topic: "/camera_forward/infra1/image_rect_raw"
1010
image1_topic: "/camera_forward/infra2/image_rect_raw"
11-
output_path: "/home/shuoy/output"
11+
output_path: "/home/shuoy/nv_work/unitree_ws/src/tightly-coupled-visual-inertial-leg-odometry/output"
1212

1313
cam0_calib: "left.yaml"
1414
cam1_calib: "right.yaml"
1515
image_width: 640
1616
image_height: 480
17-
18-
use_leg_odom: 0
19-
num_of_leg: 4
20-
leg_topic: "/a1/joint_foot"
17+
2118

2219
# Extrinsic parameter between IMU and Camera.
2320
estimate_extrinsic: 1 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
@@ -27,46 +24,46 @@ body_T_cam0: !!opencv-matrix
2724
rows: 4
2825
cols: 4
2926
dt: d
30-
data: [ 0, 0, 1, 0.04,
31-
-1, 0, 0, 0.02,
32-
0, -1, 0, 0.0,
27+
data: [ 0, 0, 1, 0.2693,
28+
-1, 0, 0, 0.025,
29+
0, -1, 0, 0.067,
3330
0., 0., 0., 1. ]
3431

3532
body_T_cam1: !!opencv-matrix
3633
rows: 4
3734
cols: 4
3835
dt: d
39-
data: [ 0, 0, 1, 0.04,
40-
-1, 0, 0, -0.03,
41-
0, -1, 0, 0.0,
36+
data: [ 0, 0, 1, 0.2693,
37+
-1, 0, 0, -0.025,
38+
0, -1, 0, 0.067,
4239
0., 0., 0., 1. ]
4340

4441
#Multiple thread support
45-
multiple_thread: 0
42+
multiple_thread: 1
4643

4744
#feature traker paprameters
48-
max_cnt: 150 # max feature number in feature tracking
49-
min_dist: 30 # min distance between two features
45+
max_cnt: 90 # max feature number in feature tracking
46+
min_dist: 20 # min distance between two features
5047
freq: 15 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
5148
F_threshold: 1.0 # ransac threshold (pixel)
52-
show_track: 0 # publish tracking image as topic
49+
show_track: 1 # publish tracking image as topic
5350
flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy
5451

5552
#optimization parameters
56-
max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
57-
max_num_iterations: 8 # max solver itrations, to guarantee real time
53+
max_solver_time: 0.1 # max solver itration time (ms), to guarantee real time
54+
max_num_iterations: 16 # max solver itrations, to guarantee real time
5855
keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
5956

6057
#imu parameters The more accurate parameters you provide, the better performance
6158
acc_n: 0.1 # accelerometer measurement noise standard deviation. #0.2 0.04
62-
gyr_n: 0.01 # gyroscope measurement noise standard deviation. #0.05 0.004
63-
acc_w: 0.001 # accelerometer bias random work noise standard deviation. #0.002
64-
gyr_w: 0.0001 # gyroscope bias random work noise standard deviation. #4.0e-5
59+
gyr_n: 0.05 # gyroscope measurement noise standard deviation. #0.05 0.004
60+
acc_w: 0.004 # accelerometer bias random work noise standard deviation. #0.002
61+
gyr_w: 0.002 # gyroscope bias random work noise standard deviation. #4.0e-5
6562
g_norm: 9.805 # gravity magnitude
6663

6764
#unsynchronization parameters
6865
estimate_td: 1 # online estimate time offset between camera and imu
69-
td: -0.00454 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
66+
td: 0.00240 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
7067

7168
#loop closure parameters
7269
load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'

0 commit comments

Comments
 (0)