Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -318,27 +318,50 @@ void integrateAllSensorDepthResourcesOfType(
// Get transformation between reference (e.g. IMU) and sensor.
aslam::Transformation T_B_S = sensor_manager.getSensor_T_B_S(sensor_id);

// Get the sensor type, as cameras and LiDARs are treated differently.
vi_map::SensorType sensor_type = sensor_manager.getSensorType(sensor_id);

// If the sensor is a camera and the resource type is a depth map we will
// need the camera model to reproject them.
aslam::NCamera::Ptr ncamera_ptr;
// need the camera model to reproject with the correct model / distortion.
aslam::Camera::Ptr camera_ptr;
if (sensor_manager.getSensorType(sensor_id) ==
vi_map::SensorType::kNCamera) {
ncamera_ptr = sensor_manager.getSensorPtr<aslam::NCamera>(sensor_id);
if (sensor_type == vi_map::SensorType::kNCamera) {
aslam::NCamera::Ptr ncamera_ptr =
sensor_manager.getSensorPtr<aslam::NCamera>(sensor_id);
CHECK(ncamera_ptr);

// TODO(smauq): This seems to be a strong assumption.
CHECK_EQ(ncamera_ptr->getNumCameras(), 1u);
camera_ptr = ncamera_ptr->getCameraShared(0);
// Need to update the sensor extrinsics, since ncameras have an
// additiona extrinsics between ncamera frame and camera frame.
T_B_S = T_B_S * ncamera_ptr->get_T_C_B(0).inverse();
CHECK(camera_ptr);

// Optionally, remove distortion from camera.
if (use_undistorted_camera_for_depth_maps) {
aslam::Camera::Ptr camera_no_distortion;
backend::createCameraWithoutDistortion(
*camera_ptr, &camera_no_distortion);
CHECK(camera_no_distortion);
camera_ptr = camera_no_distortion;
}
}
// Optionally, remove distortion from camera.
if (use_undistorted_camera_for_depth_maps && camera_ptr) {
aslam::Camera::Ptr camera_no_distortion;
backend::createCameraWithoutDistortion(
*camera_ptr, &camera_no_distortion);
CHECK(camera_no_distortion);
camera_ptr = camera_no_distortion;

// Pick defaults that do nothing so we don't have to check later on
// if it's a LiDAR or camera. Like this by default we will only remove
// invalid points and no other filtering.
double min_range_m = 0.0;
double max_range_m = std::numeric_limits<double>::infinity();
const resources::BoundingBox3D* robot_filter = nullptr;

if (sensor_type == vi_map::SensorType::kLidar) {
vi_map::Lidar::Ptr lidar_ptr =
sensor_manager.getSensorPtr<vi_map::Lidar>(sensor_id);

min_range_m = lidar_ptr->getMinRangeM();
max_range_m = lidar_ptr->getMaxRangeM();

// The robot filter is optional.
if (lidar_ptr->hasRobotFilter()) {
robot_filter = lidar_ptr->getRobotFilterPtr();
}
}

size_t num_resources = resource_buffer_ptr->size();
Expand Down Expand Up @@ -498,8 +521,63 @@ void integrateAllSensorDepthResourcesOfType(
<< "timestamp " << timestamp_ns << "ns!";
}

// If we have timing information for the points undistort the point
// cloud, based on the current poses.
if (point_cloud.hasTimes()) {
int32_t min_time_ns, max_time_ns;
point_cloud.getMinMaxTimeNanoseconds(&min_time_ns, &max_time_ns);

// The undistortion might take us outside the posegraph range
if (timestamp_ns + min_time_ns < min_timestamp_ns ||
timestamp_ns + max_time_ns > max_timestamp_ns) {
VLOG(3) << "The optional depth resource at " << timestamp_ns
<< "ns is outside of the time range of the pose graph, "
<< "skipping.";
continue;
}

// We only use the IMU to interpolate a limited number of poses,
// and then resort to linear interpolation in between for each
// point. This is a significant speedup.
const int64_t kNumInterpolationPoints = 100;
Eigen::Matrix<int64_t, 1, Eigen::Dynamic> interpolated_timestamps(
kNumInterpolationPoints);
// Evenly space the interpolation poses, while making sure the
// start and end are included so the entire scan time covered.
const int64_t time_step =
(max_time_ns - min_time_ns) / (kNumInterpolationPoints - 1);
for (int64_t i = 0; i < kNumInterpolationPoints - 1; ++i) {
interpolated_timestamps[i] =
timestamp_ns + min_time_ns + i * time_step;
}
// Last one manually to avoid integer rounding errors
interpolated_timestamps[kNumInterpolationPoints - 1] =
timestamp_ns + max_time_ns;

aslam::TransformationVector interpolated_poses;
pose_interpolator.getPosesAtTime(
vi_map, mission_id, interpolated_timestamps,
&interpolated_poses);

// The interpolated poses are now from IMU to MAP frame. Bring
// them and the timestamps into the LIDAR start frame.
for (int64_t i = 0; i < kNumInterpolationPoints; ++i) {
interpolated_timestamps[i] -= timestamp_ns;
interpolated_poses[i] = T_B_S.inverse() * T_M_B.inverse() *
interpolated_poses[i] * T_B_S;
}

point_cloud.undistort(
interpolated_timestamps, interpolated_poses);
}

size_t num_removed_points = point_cloud.filterValidMinMaxBox(
min_range_m, max_range_m, robot_filter);

VLOG(3) << "Found point cloud at timestamp " << timestamp_ns
<< "ns";
<< "ns. With " << point_cloud.size() << " points, and "
<< num_removed_points << " points removed after filter "
<< "conditions from the sensor yaml.";
integratePointCloud(
timestamp_ns, T_G_S, point_cloud, integration_function);
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ void integratePointCloud(
voxblox_point_cloud.colors = &tmp_colors;
CHECK(backend::convertPointCloudType(points_C, &voxblox_point_cloud));

// If there is no color, create an empty vector of matchin size.
// If there is no color, create an empty vector of matching size.
if (tmp_colors.empty()) {
tmp_colors.resize(tmp_points_C.size());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
#include <landmark-triangulation/pose-interpolator.h>
#include <map-resources/resource-conversion.h>
#include <memory>
#include <opencv2/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <posegraph/unique-id.h>
#include <sensors/absolute-6dof-pose.h>
#include <sensors/imu.h>
Expand All @@ -25,10 +23,7 @@

DECLARE_bool(map_builder_save_point_clouds_as_resources);
DECLARE_bool(map_builder_save_point_cloud_maps_as_resources);
DECLARE_bool(
map_builder_save_point_clouds_as_range_image_including_intensity_image);
DECLARE_bool(map_builder_save_image_as_resources);
DECLARE_bool(map_builder_visualize_lidar_depth_maps_in_ocv_window);

namespace aslam {
class NCamera;
Expand Down Expand Up @@ -190,13 +185,6 @@ class StreamMapBuilder {
aslam::Transformation T_M0_G_;
bool is_first_baseframe_estimate_processed_;

// Store lidar depth camera for lidar point cloud projection.
aslam::SensorId lidar_depth_camera_id_;
aslam::Camera::Ptr lidar_depth_camera_sensor_;
// Save transformation between lidar sensor frame and lidar camera sensor
// frame.
aslam::Transformation T_C_lidar_S_lidar_;

std::unordered_map<
aslam::SensorId,
common::TemporalBuffer<vi_map::ExternalFeaturesMeasurement::ConstPtr>>
Expand Down Expand Up @@ -231,73 +219,31 @@ void StreamMapBuilder::attachLidarMeasurement(
"not yet present in the map's sensor manager!";

resources::PointCloud point_cloud;
backend::convertPointCloudType<PointCloudType, resources::PointCloud>(
lidar_measurement.getPointCloud(), &point_cloud);

vi_map::VIMission& mission = map_->getMission(mission_id_);

if (lidar_depth_camera_id_.isValid()) {
CHECK(lidar_depth_camera_sensor_);

// Transform into camera frame.
point_cloud.applyTransformation(T_C_lidar_S_lidar_);

cv::Mat range_image, image;
cv::Mat* image_ptr =
(FLAGS_map_builder_save_point_clouds_as_range_image_including_intensity_image) // NOLINT
? &image
: nullptr;

backend::convertPointCloudToDepthMap(
point_cloud, *lidar_depth_camera_sensor_, true /*use_openni_format*/,
true /*create_range_image*/, &range_image, image_ptr);

if (FLAGS_map_builder_visualize_lidar_depth_maps_in_ocv_window) {
cv::namedWindow("depth");
cv::namedWindow("intensity");
double min;
double max;
cv::minMaxIdx(range_image, &min, &max, 0, 0, range_image > 1e-6);
max = std::min(10000., max);

cv::Mat scaled_range_image;
float scale = 255 / (max - min);
range_image.convertTo(scaled_range_image, CV_8UC1, scale, -min * scale);
cv::Mat color_image;
cv::applyColorMap(scaled_range_image, color_image, cv::COLORMAP_JET);
color_image.setTo(cv::Scalar(0u, 0u, 0u), range_image < 1e-6);
cv::imshow("depth", color_image);
if (image_ptr != nullptr) {
image.setTo(cv::Scalar(0u), range_image < 1e-6);
cv::imshow("intensity", image);
}
cv::waitKey(1);
}

CHECK_EQ(CV_MAT_TYPE(range_image.type()), CV_16UC1);
map_->addSensorResource(
backend::ResourceType::kRawDepthMap, lidar_depth_camera_id_,
lidar_measurement.getTimestampNanoseconds(), range_image, &mission);

if (!image.empty()) {
if (image.type() == CV_8UC1) {
map_->addSensorResource(
backend::ResourceType::kImageForDepthMap, lidar_depth_camera_id_,
lidar_measurement.getTimestampNanoseconds(), image, &mission);
} else if (image.type() == CV_8UC3) {
map_->addSensorResource(
backend::ResourceType::kColorImageForDepthMap,
lidar_depth_camera_id_, lidar_measurement.getTimestampNanoseconds(),
image, &mission);
}
}
const vi_map::Lidar& lidar_sensor =
map_->getSensorManager().getSensor<vi_map::Lidar>(lidar_sensor_id);

if (lidar_sensor.hasPointTimestamps()) {
const uint32_t convert_to_ns =
lidar_sensor.getTimestampConversionToNanoseconds();
const int64_t time_offset_ns =
lidar_sensor.hasRelativePointTimestamps()
? lidar_measurement.getTimestampNanoseconds()
: 0;
backend::convertPointCloudType<PointCloudType, resources::PointCloud>(
lidar_measurement.getPointCloud(), &point_cloud, true, convert_to_ns,
time_offset_ns);
} else {
backend::ResourceType point_cloud_type =
backend::getResourceTypeForPointCloud(point_cloud);
map_->addSensorResource(
point_cloud_type, lidar_sensor_id,
lidar_measurement.getTimestampNanoseconds(), point_cloud, &mission);
backend::convertPointCloudType<PointCloudType, resources::PointCloud>(
lidar_measurement.getPointCloud(), &point_cloud, false);
}

backend::ResourceType point_cloud_type =
backend::getResourceTypeForPointCloud(point_cloud);
vi_map::VIMission& mission = map_->getMission(mission_id_);
map_->addSensorResource(
point_cloud_type, lidar_sensor_id,
lidar_measurement.getTimestampNanoseconds(), point_cloud, &mission);
}

template <typename PointCloudType>
Expand Down
73 changes: 0 additions & 73 deletions algorithms/online-map-builders/src/stream-map-builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,29 +23,11 @@ DEFINE_bool(
"Store the point clouds associated with a lidar sensor to the map resource "
"folder.");

DEFINE_string(
map_builder_save_point_clouds_as_range_image_camera_id, "",
"Camera id of the depth camera used to convert lidar point clouds to range "
"images "
"maps.");

DEFINE_bool(
map_builder_save_point_clouds_as_range_image_including_intensity_image,
true,
"If enabled, the color or intensity information in the point cloud is used "
"to create a corresponding intensity/color for each point cloud in "
"addition to the range image.");

DEFINE_bool(
map_builder_save_point_cloud_maps_as_resources, true,
"Store the point cloud (sub-)maps associated with an external source to "
"the map resource folder.");

DEFINE_bool(
map_builder_visualize_lidar_depth_maps_in_ocv_window, false,
"If enabled, opencv windows with the result of the lidar scan to lidar "
"depth map conversion will be opened.");

namespace online_map_builders {

const vi_map::VIMap* StreamMapBuilder::constMap() const {
Expand Down Expand Up @@ -87,61 +69,6 @@ StreamMapBuilder::StreamMapBuilder(
sensor_manager.getAllSensorIds(&sensor_ids);
map_->associateMissionSensors(sensor_ids, mission_id_);

// Retrieve depth camera id from flags to later convert lidar point clouds to
// depth maps.
if (FLAGS_map_builder_save_point_clouds_as_resources &&
!FLAGS_map_builder_save_point_clouds_as_range_image_camera_id.empty()) {
lidar_depth_camera_id_.fromHexString(
FLAGS_map_builder_save_point_clouds_as_range_image_camera_id);
if (!lidar_depth_camera_id_.isValid()) {
LOG(ERROR)
<< "[StreamMapBuilder] The depth camera id ("
<< FLAGS_map_builder_save_point_clouds_as_range_image_camera_id
<< ") provided to project the lidar point clouds into depth maps is "
<< "not valid! Point clouds will not be projected into depth maps.";
} else if (!map_->getSensorManager().hasSensor(lidar_depth_camera_id_)) {
LOG(ERROR)
<< "[StreamMapBuilder] The depth camera id ("
<< lidar_depth_camera_id_
<< ") provided to project the lidar point clouds into depth maps is "
<< "not in the sensor manager! Point clouds will not be projected "
"into depth maps.";
} else {
VLOG(1) << "[StreamMapBuilder] Using depth camera "
<< lidar_depth_camera_id_
<< " to project lidar scans into depth maps.";

aslam::NCamera::Ptr lidar_depth_camera_sensor_ncamera_ptr =
map_->getSensorManager().getSensorPtr<aslam::NCamera>(
lidar_depth_camera_id_);
CHECK(lidar_depth_camera_sensor_ncamera_ptr);
CHECK_EQ(lidar_depth_camera_sensor_ncamera_ptr->numCameras(), 1u);
lidar_depth_camera_sensor_ =
lidar_depth_camera_sensor_ncamera_ptr->getCameraShared(0u);
CHECK(lidar_depth_camera_sensor_);
const aslam::Transformation& T_C_lidar_Cn_lidar =
lidar_depth_camera_sensor_ncamera_ptr->get_T_C_B(0u);

const aslam::Transformation& T_B_Cn_lidar =
map_->getSensorManager().getSensor_T_B_S(lidar_depth_camera_id_);
CHECK(map_->getMission(mission_id_).hasLidar())
<< "[StreamMapBuilder] Mission " << mission_id_
<< " does not have a lidar sensor and therefore cannot attach lidar "
<< "measurements!";
const aslam::SensorId& lidar_sensor_id =
map_->getMission(mission_id_).getLidarId();
CHECK(map_->getSensorManager().hasSensor(lidar_sensor_id))
<< "[StreamMapBuilder] Mission " << mission_id_
<< " has a lidar id associated (" << lidar_sensor_id
<< ") but this lidar does not exist in the sensor manager!";

const aslam::Transformation& T_B_S_lidar =
map_->getSensorManager().getSensor_T_B_S(lidar_sensor_id);
T_C_lidar_S_lidar_ =
T_C_lidar_Cn_lidar * T_B_Cn_lidar.inverse() * T_B_S_lidar;
}
}

// Initialize wheel odometry origin frame tracking
T_Ow_Btm1_.setIdentity();
}
Expand Down
4 changes: 0 additions & 4 deletions backend/map-resources/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,6 @@ catkin_add_gtest(test_resource_map test/test_resource_map.cc)
target_link_libraries(test_resource_map ${PROJECT_NAME})
add_dependencies(test_resource_map ${PROJECT_TEST_DATA})

catkin_add_gtest(test_resource_conversion test/test_resource_conversion.cc)
target_link_libraries(test_resource_conversion ${PROJECT_NAME})
add_dependencies(test_resource_conversion ${PROJECT_TEST_DATA})

catkin_add_gtest(test_temporal_resource_id_buffer test/test-temporal-resource-id-buffer.cc)
target_link_libraries(test_temporal_resource_id_buffer ${PROJECT_NAME})

Expand Down
Loading