Skip to content

Commit

Permalink
Add neighbor-based matching, but perform pretty bad
Browse files Browse the repository at this point in the history
  • Loading branch information
felixchenfy committed Sep 16, 2019
1 parent 693eefc commit 5365809
Show file tree
Hide file tree
Showing 12 changed files with 187 additions and 56 deletions.
14 changes: 7 additions & 7 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ include_directories( ${PROJECT_SOURCE_DIR}/include )
add_subdirectory( src )
add_subdirectory( test )

# add_executable(run_vo run_vo.cpp)
# target_link_libraries( run_vo
# basics
# geometry
# display
# vo
# )
add_executable(run_vo run_vo.cpp)
target_link_libraries( run_vo
basics
geometry
display
vo
)
33 changes: 23 additions & 10 deletions config/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ fr1_desk: # fr1_desk dataset
max_num_imgs_to_proc: 300
# is_pcl_wait_for_keypress: "true" # If true, PCL Viewer will stop after each update, and wait for your keypress.
is_pcl_wait_for_keypress: "false"
cv_waitkey_time: 30
cv_waitkey_time: 1
save_predicted_traj_to: /home/feiyu/Documents/Projects/2018-winter/EECS432_CV_VO/data/test_data/cam_traj.txt
output_folder: "output"

Expand All @@ -61,27 +61,40 @@ output_folder: "output"
# ==============================================================

# ------------------- Feature Matching -------------------
# ORB settings
number_of_keypoints_to_extract: 8000
max_number_of_keypoints: 1500
scale_factor: 1.2
level_pyramid: 4
score_threshold: 20

# ------------------- Feature Matching -------------------

feature_match_method_index_initialization: 1
feature_match_method_index_triangulation: 1
feature_match_method_index_pnp: 1

# Matching descriptos
feature_match_method_index: 1
feature_match_method_index: 3
# method 1: the method in Dr. Xiang Gao's slambook:
# distance_threshold = max<float>(min_dis * match_ratio, 30.0);
# method 2: the method in Lowe's 2004 paper:
# min dis / second min dis < 0.8
# method 3: match a point with its neighboring points and then apply Dr. Xiang Gao's criteria.
match_ratio: 2 # This is for method 1 used in Dr. Xiang Gao's slambook:
lowe_dist_ratio: 0.8 # This is for method 2 of Lowe's 2004 SIFT paper
xiang_gao_method_match_ratio: 2 # This is for method 1 used in Dr. Xiang Gao's slambook:
lowe_method_dist_ratio: 0.8 # This is for method 2 of Lowe's 2004 SIFT paper
method_3_feature_dist_threshold: 50.0

# Method 3 parameters:
max_matching_pixel_dist_in_initialization: 100
max_matching_pixel_dist_in_triangulation: 100
max_matching_pixel_dist_in_pnp: 50

# remove wrong matches
kpts_uniform_selection_grid_size: 16
kpts_uniform_selection_max_pts_per_grid: 8

# ORB settings
number_of_keypoints_to_extract: 8000
max_number_of_keypoints: 1500
scale_factor: 1.2
level_pyramid: 3
score_threshold: 15


# ------------------- RANSAC Essential matrix -------------------

Expand Down
10 changes: 9 additions & 1 deletion include/my_slam/geometry/feature_match.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,19 @@ void calcDescriptors(const cv::Mat &image,
void matchFeatures(
const cv::Mat1b &descriptors_1, const cv::Mat1b &descriptors_2,
vector<cv::DMatch> &matches,
int method_index = 1,
bool is_print_res = false,
// Below are optional arguments for feature_matching_method_index==3
const vector<cv::KeyPoint> &keypoints_1 = vector<cv::KeyPoint>(),
const vector<cv::KeyPoint> &keypoints_2 = vector<cv::KeyPoint>(),
const double max_dist_between_two_matched_kpts = 0.0);
float max_matching_pixel_dist = 0.0);

vector<cv::DMatch> matchByRadiusAndBruteForce(
const vector<cv::KeyPoint> &keypoints_1,
const vector<cv::KeyPoint> &keypoints_2,
const cv::Mat1b &descriptors_1,
const cv::Mat1b &descriptors_2,
float max_matching_pixel_dist);

// Remove duplicate matches.
// After cv's match func, many kpts in I1 might matched to a same kpt in I2.
Expand Down
10 changes: 1 addition & 9 deletions include/my_slam/vo/frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,7 @@ class Frame
kpts_colors_.push_back(basics::getPixelAt(rgb_img_, x, y));
}
};
void matchFeatures(Frame::Ptr prev_frame, double max_dist_between_two_matched_kpts)
{
geometry::matchFeatures(
prev_frame->descriptors_, descriptors_,
matches_with_ref_,
true, // print result
prev_frame->keypoints_, keypoints_,
max_dist_between_two_matched_kpts);
}
cv::Point2f projectWorldPointToImage(const cv::Point3f &p_world);
bool isInFrame(const cv::Point3f &p_world);
bool isInFrame(const cv::Mat &p_world);
bool isMappoint(int idx)
Expand Down
1 change: 1 addition & 0 deletions include/my_slam/vo/vo.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ class VisualOdometry
void pushCurrPointsToMap_();
void getMappointsInCurrentView_(
vector<MapPoint::Ptr> &candidate_mappoints_in_map,
vector<cv::Point2f> &candidate_mappoints_in_2d_image,
cv::Mat &corresponding_mappoints_descriptors);
double getViewAngle_(Frame::Ptr frame, MapPoint::Ptr point);

Expand Down
3 changes: 1 addition & 2 deletions run_vo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,10 +255,9 @@ bool drawResultByPcl(basics::Yaml config_dataset,
// Start drawing only when visual odometry has been initialized. (i.e. The first few frames are not drawn.)
// The reason is: we need scale the truth pose to be same as estiamted pose, so we can make comparison between them.
// (And the underlying reason is that Mono SLAM cannot estiamte depth of point.)
static double scale;
if (vo->isInitialized())
{
scale = basics::calcMatNorm(truth_t) / basics::calcMatNorm(t);
static double scale = basics::calcMatNorm(truth_t) / basics::calcMatNorm(t);
truth_t /= scale;
pcl_displayer->updateCameraTruthPose(truth_R_vec, truth_t);
}
Expand Down
3 changes: 3 additions & 0 deletions src/basics/opencv_funcs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,10 @@ double scalePointPos(cv::Point3f &p, double scale)
p.y *= scale;
p.z *= scale;
}

double calcMatNorm(const cv::Mat &mat)
// You should use this instead:
// C++: double norm(InputArray src1, InputArray src2, int normType=NORM_L2, InputArray mask=noArray() )
{
double sum = 0;
for (int i = 0; i < mat.rows; i++)
Expand Down
87 changes: 72 additions & 15 deletions src/geometry/feature_match.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,23 +83,60 @@ void selectUniformKptsByGrid(
keypoints = tmp_keypoints;
}

// void MatcherBfRadius()
// {
vector<cv::DMatch> matchByRadiusAndBruteForce(
const vector<cv::KeyPoint> &keypoints_1,
const vector<cv::KeyPoint> &keypoints_2,
const cv::Mat1b &descriptors_1,
const cv::Mat1b &descriptors_2,
float max_matching_pixel_dist)
{
int N1 = keypoints_1.size(), N2 = keypoints_2.size();
assert(N1 == descriptors_1.rows && N2 == descriptors_2.rows);
vector<cv::DMatch> matches;
float r2 = max_matching_pixel_dist * max_matching_pixel_dist;
for (int i = 0; i < N1; i++)
{
const cv::KeyPoint &kpt1 = keypoints_1[i];
bool is_matched = false;
float x = kpt1.pt.x, y = kpt1.pt.y;
double min_feature_dist = 99999999.0, target_idx = 0;
for (int j = 0; j < N2; j++)
{
float x2 = keypoints_2[j].pt.x, y2 = keypoints_2[j].pt.y;
if ((x - x2) * (x - x2) + (y - y2) * (y - y2) <= r2)
{
// double feature_dist = cv::norm(descriptors_1.row(i), descriptors_2.row(j));
cv::Mat diff;
cv::absdiff(descriptors_1.row(i), descriptors_2.row(j), diff);
double feature_dist = cv::sum(diff)[0] / descriptors_1.cols;
if (feature_dist < min_feature_dist)
{
min_feature_dist = feature_dist;
target_idx = j;
is_matched = true;
}
}
}
if (is_matched)
matches.push_back(cv::DMatch(i, target_idx, static_cast<float>(min_feature_dist)));
}
return matches;
}

// }
void matchFeatures(
const cv::Mat1b &descriptors_1, const cv::Mat1b &descriptors_2,
vector<cv::DMatch> &matches,
int method_index,
bool is_print_res,
// Below are optional arguments for feature_matching_method_index==3
const vector<cv::KeyPoint> &keypoints_1,
const vector<cv::KeyPoint> &keypoints_2,
const double max_dist_between_two_matched_kpts)
float max_matching_pixel_dist)
{
// -- Set arguments
static const double match_ratio = basics::Config::get<int>("match_ratio");
static const double dist_ratio = basics::Config::get<int>("lowe_dist_ratio");
static const int feature_matching_method_index = basics::Config::get<int>("feature_match_method_index");

static const double xiang_gao_method_match_ratio = basics::Config::get<int>("xiang_gao_method_match_ratio");
static const double lowe_method_dist_ratio = basics::Config::get<int>("lowe_method_dist_ratio");
static const double method_3_feature_dist_threshold = basics::Config::get<int>("method_3_feature_dist_threshold");
static cv::FlannBasedMatcher matcher_flann(new cv::flann::LshIndexParams(5, 10, 2));
static cv::Ptr<cv::DescriptorMatcher> matcher_bf = cv::DescriptorMatcher::create("BruteForce-Hamming");

Expand All @@ -112,14 +149,33 @@ void matchFeatures(
// Start matching
matches.clear();
double min_dis = 9999999, max_dis = 0, distance_threshold = -1;
if (feature_matching_method_index == 1) // the method in Dr. Xiang Gao's slambook
if (method_index == 1 || method_index == 3) // the method in Dr. Xiang Gao's slambook
// Match keypoints with similar descriptors.
// For kpt_i, if kpt_j's descriptor if most similar to kpt_i's, then they are matched.
{
vector<cv::DMatch> all_matches;
matcher_flann.match(descriptors_1, descriptors_2, all_matches);
if (method_index == 3)
all_matches = matchByRadiusAndBruteForce(
keypoints_1, keypoints_2, descriptors_1, descriptors_2,
max_matching_pixel_dist);
else
matcher_flann.match(descriptors_1, descriptors_2, all_matches);

// Find a min-distance threshold for selecting good matches
// if (method_index == 3)
// distance_threshold = method_3_feature_dist_threshold;
// else
// {
// // Find a min-distance threshold for selecting good matches
// for (int i = 0; i < all_matches.size(); i++)
// {
// double dist = all_matches[i].distance;
// if (dist < min_dis)
// min_dis = dist;
// if (dist > max_dis)
// max_dis = dist;
// }
// distance_threshold = std::max<float>(min_dis * xiang_gao_method_match_ratio, 30.0);
// }
for (int i = 0; i < all_matches.size(); i++)
{
double dist = all_matches[i].distance;
Expand All @@ -128,7 +184,8 @@ void matchFeatures(
if (dist > max_dis)
max_dis = dist;
}
distance_threshold = std::max<float>(min_dis * match_ratio, 30.0);
distance_threshold = std::max<float>(min_dis * xiang_gao_method_match_ratio, 30.0);

// Another way of getting the minimum:
// min_dis = std::min_element(all_matches.begin(), all_matches.end(),
// [](const cv::DMatch &m1, const cv::DMatch &m2) {return m1.distance < m2.distance;})->distance;
Expand All @@ -138,7 +195,7 @@ void matchFeatures(
if (m.distance < distance_threshold)
matches.push_back(m);
}
else if (feature_matching_method_index == 2)
else if (method_index == 2)
{ // method in Lowe's 2004 paper
// Calculate the features's distance of the two images.
vector<vector<cv::DMatch>> knn_matches;
Expand All @@ -156,7 +213,7 @@ void matchFeatures(
{

double dist = knn_matches[i][0].distance;
if (dist < dist_ratio * knn_matches[i][1].distance)
if (dist < lowe_method_dist_ratio * knn_matches[i][1].distance)
matches.push_back(knn_matches[i][0]);
if (dist < min_dis)
min_dis = dist;
Expand All @@ -174,7 +231,7 @@ void matchFeatures(
if (is_print_res)
{
printf("Matching features:\n");
printf("Using method %d, threshold = %f\n", feature_matching_method_index, distance_threshold);
printf("Using method %d, threshold = %f\n", method_index, distance_threshold);
printf("Number of matches: %d\n", int(matches.size()));
printf("-- Max dist : %f \n", max_dis);
printf("-- Min dist : %f \n", min_dis);
Expand Down
10 changes: 10 additions & 0 deletions src/vo/frame.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ Frame::Ptr Frame::createFrame(cv::Mat rgb_img, geometry::Camera::Ptr camera, dou
return frame;
}

cv::Point2f Frame::projectWorldPointToImage(const cv::Point3f &p_world)
{
cv::Point3f p_cam = basics::preTranslatePoint3f(p_world, T_w_c_.inv()); // T_c_w * p_w = p_c
cv::Point2f pixel = geometry::cam2pixel(p_cam, camera_->K_);
return pixel;
}


bool Frame::isInFrame(const cv::Point3f &p_world)
{
cv::Point3f p_cam = basics::preTranslatePoint3f(p_world, T_w_c_.inv()); // T_c_w * p_w = p_c
Expand All @@ -26,10 +34,12 @@ bool Frame::isInFrame(const cv::Point3f &p_world)
cv::Point2f pixel = geometry::cam2pixel(p_cam, camera_->K_);
return pixel.x > 0 && pixel.y > 0 && pixel.x < rgb_img_.cols && pixel.y < rgb_img_.rows;
}

bool Frame::isInFrame(const cv::Mat &p_world)
{
return isInFrame(basics::Mat3x1_to_Point3f(p_world));
}

cv::Mat Frame::getCamCenter()
{
return basics::getPosFromT(T_w_c_);
Expand Down
49 changes: 40 additions & 9 deletions src/vo/vo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ VisualOdometry::VisualOdometry() : map_(new (Map))

void VisualOdometry::getMappointsInCurrentView_(
vector<MapPoint::Ptr> &candidate_mappoints_in_map,
vector<cv::Point2f> &candidate_2d_pts_in_image,
cv::Mat &corresponding_mappoints_descriptors)
{
// vector<MapPoint::Ptr> candidate_mappoints_in_map;
Expand All @@ -23,13 +24,26 @@ void VisualOdometry::getMappointsInCurrentView_(
corresponding_mappoints_descriptors.release();
for (auto &iter_map_point : map_->map_points_)
{
MapPoint::Ptr &p = iter_map_point.second;
if (curr_->isInFrame(p->pos_)) // check if p in curr frame image
MapPoint::Ptr &p_world = iter_map_point.second;

// -- Check if p in curr frame image
bool is_p_in_curr_frame = true;
cv::Point3f p_cam = basics::preTranslatePoint3f(p_world->pos_, curr_->T_w_c_.inv()); // T_c_w * p_w = p_c
if (p_cam.z < 0)
is_p_in_curr_frame = false;
cv::Point2f pixel = geometry::cam2pixel(p_cam, curr_->camera_->K_);
const bool is_inside_image = pixel.x > 0 && pixel.y > 0 && pixel.x < curr_->rgb_img_.cols && pixel.y < curr_->rgb_img_.rows;
if (!is_inside_image)
is_p_in_curr_frame = false;

// -- If is in current frame,
// then add this point to candidate_mappoints_in_map
if (is_p_in_curr_frame)
{
// -- add to candidate_mappoints_in_map
candidate_mappoints_in_map.push_back(p);
corresponding_mappoints_descriptors.push_back(p->descriptor_);
p->visible_times_++;
candidate_mappoints_in_map.push_back(p_world);
candidate_2d_pts_in_image.push_back(pixel);
corresponding_mappoints_descriptors.push_back(p_world->descriptor_);
p_world->visible_times_++;
}
}
}
Expand Down Expand Up @@ -254,11 +268,26 @@ bool VisualOdometry::poseEstimationPnP_()
{
// -- From the local map, find the keypoints that fall into the current view
vector<MapPoint::Ptr> candidate_mappoints_in_map;
vector<cv::Point2f> candidate_2d_pts_in_image;
cv::Mat corresponding_mappoints_descriptors;
getMappointsInCurrentView_(candidate_mappoints_in_map, corresponding_mappoints_descriptors);
getMappointsInCurrentView_(
candidate_mappoints_in_map,
candidate_2d_pts_in_image,
corresponding_mappoints_descriptors);
vector<cv::KeyPoint> candidate_2d_kpts_in_image = geometry::pts2Keypts(candidate_2d_pts_in_image);

// -- Compare descriptors to find matches, and extract 3d 2d correspondance
geometry::matchFeatures(corresponding_mappoints_descriptors, curr_->descriptors_, curr_->matches_with_map_);
static const float max_matching_pixel_dist_in_pnp =
basics::Config::get<float>("max_matching_pixel_dist_in_pnp");
static const int method_index = basics::Config::get<float>("feature_match_method_index_pnp");
geometry::matchFeatures(
corresponding_mappoints_descriptors, curr_->descriptors_,
curr_->matches_with_map_,
method_index,
false,
candidate_2d_kpts_in_image, curr_->keypoints_,
max_matching_pixel_dist_in_pnp);

const int num_matches = curr_->matches_with_map_.size();
cout << "Number of 3d-2d pairs: " << num_matches << endl;
vector<cv::Point3f> pts_3d;
Expand Down Expand Up @@ -338,7 +367,9 @@ bool VisualOdometry::poseEstimationPnP_()
dist_to_prev_keyframe, max_possible_dist_to_prev_keyframe);
is_pnp_good = false;
}
}else{
}
else
{
printf("PnP num inlier matches: %d.\n", num_matches);
}

Expand Down
Loading

0 comments on commit 5365809

Please sign in to comment.