Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Init calibration with blender benchmark #1

Open
wants to merge 16 commits into
base: develop
Choose a base branch
from
9 changes: 9 additions & 0 deletions calibration_with_blender/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Calibration with blender
### Create synth images with calibration pattern by blender:
* Open calibration.blend in blender
* Copy render.py content to blender text editor
* Run script in blender
* Distort images by image_distort.cpp

### Pattern generation command
gen_pattern.py -c 14 -r 19 -T checkerboard -u px -s 220 -w 3508 -h 4961 && convert out.svg checkerboard.png
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add reference to OpenCV repo where to get gen_pattern.py and refererence to ImageMagick is required. At least apt-get install ...

Binary file added calibration_with_blender/calibration.blend
Binary file not shown.
Binary file added calibration_with_blender/checkerboard.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
73 changes: 73 additions & 0 deletions calibration_with_blender/image_distort.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#include <iostream>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>

int main(int argc, char* argv[])
{
if (argc < 8)
{
std::cout << "usage; " << argv[0] << "image camera_model fx fy cx cy [d0 .. dn] output"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

; - typo.

<< std::endl;
return EXIT_FAILURE;
}

/* Load original image */
cv::Mat image = cv::imread(argv[1]);
if (image.empty())
{
std::cout << "Could not open " << argv[1] << std::endl;
return EXIT_FAILURE;
}

cv::imshow("original", image);
cv::waitKey(10);

/* Construct K */
cv::Mat_<float> camera_matrix = cv::Mat::eye(3, 3, CV_32F);
camera_matrix(0, 0) = std::stof(argv[3]);
camera_matrix(1, 1) = std::stof(argv[4]);
camera_matrix(0, 2) = std::stof(argv[5]) == 0 ? image.size().width / 2.f: std::stof(argv[5]);
camera_matrix(1, 2) = std::stof(argv[6]) == 0 ? image.size().height / 2.f: std::stof(argv[6]);

std::cout << camera_matrix << std::endl;

/* Load distortion coefficient */
cv::Mat distortion = cv::Mat::zeros(1, 5, CV_32F);
for (size_t i = 7; i < argc - 1; ++i)
{
distortion.at<float>(0, i - 7) = std::stof(argv[i]);
}

std::cout << distortion << std::endl;

/* Collect original point location */
std::vector<cv::Point2f> image_points;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
image_points.emplace_back(j, i);
}
}

/* Since remap make inverse operation, for distort image we need undistort original point position */
cv::Mat_<cv::Point2f> undistorted_points(image.size());
cv::undistortPoints(image_points, undistorted_points, camera_matrix, distortion,
cv::noArray(),
camera_matrix);

/* 2-channel (x & y), shape equal to result image */
undistorted_points = undistorted_points.reshape(2, image.rows);

std::cout << undistorted_points.size() << std::endl;

/* Fill result image */
cv::Mat distorted;
cv::remap(image, distorted, undistorted_points, cv::noArray(), cv::INTER_LANCZOS4);

/* Save result */
cv::imshow("distorted", distorted);
cv::waitKey(0);
cv::imwrite(argv[argc - 1], distorted);
}
106 changes: 106 additions & 0 deletions calibration_with_blender/render.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import bpy
from bpy_extras.object_utils import world_to_camera_view
import random
import time


def check_projection(cam, obj):
scene = bpy.context.scene
render = scene.render

#print(obj.data.vertices[0].co, "- Vert 0 (original)")

# Convert vertices to mesh
me = obj.to_mesh()
#print(me.vertices[0].co, " - Vert 0 (deformed/modified)")

# Transform mesh according to translation and rotation
me.transform(obj.matrix_world)
#print(me.vertices[0].co, " - Vert 0 (deformed/modified, world space)")


# Collect mesh coordinates
verts = [vert.co for vert in me.vertices]
print(list(verts))

# Convert to normalized device coordinates
coords_2d = [world_to_camera_view(scene, cam, coord) for coord in verts]


# x, y must be in [0, 1], z > 0
for x, y, z in coords_2d:
print(x, y, z)
if x < 0 or x > 1:
return False
if y < 0 or y > 1:
return False
if z <= 0:
return False

return True


def set_position_origin(obj):
obj.location.x = 0
obj.location.y = 0
obj.location.z = 2

obj.rotation_euler[0] = 0
obj.rotation_euler[1] = 0
obj.rotation_euler[2] = 0



# Get camera
c = bpy.data.objects['Camera']

# Set camera intrincs
c.lens_unit = 'MILLIMETERS'
c.lens = 20 # focus length

# Set camera position
set_position_origin(c)
c.location.z = 1


# Set pattern init position
p = bpy.data.objects['checkerboard']

set_position_origin(p)

N = 30 # Number of genrated images
n = 0
i = 0

random.seed(1)

while n < N and i < 1000:
# Set position
p.location.x = random.uniform(-0.4, 0.4)
p.location.y = random.uniform(-0.4, 0.4)
p.location.z = random.uniform(-0.2, 0.2)

# Set rotation
p.rotation_euler[0] = random.uniform(0, 0.5)
p.rotation_euler[1] = random.uniform(0, 0.5)
p.rotation_euler[2] = random.uniform(0, 0.5)

# Update matrices
bpy.context.view_layer.update()

# Debug
print('>',i,'<')
print(p.location)
print(p.rotation_euler)

# Render and save image if it fully visible
if check_projection(c, p):
print('True')
bpy.context.scene.render.filepath = '/tmp/render-{:03d}.jpg'.format(i)
bpy.ops.render.render(write_still=True)
n += 1

else:
print('False')

i +=1