0.双目立体视觉的基本建立步骤
a)双目标定(samples/cpp/stereo_calib.cpp),由一套操作完成。
b)图像根据标定结果进行极线矫正(stereoRectify 函数)
c)在每条极线上寻找对应点(视差)(也有很多种选择,StereoMatcher)
d)根据视差转换为点云(cv2.reprojectImageTo3D)
e)点云存储(samples/python/stereo_match.py/write_ply)和显示
1. 双目棋盘格标定详解
1.1 c++例子中标定的函数:
1StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified);
需要一系列的图像
标定板格子的个数(比如8*6)
标定板格子的尺寸(比如20mm)
displayCorners 是否显示角点
useCalibrated 是否使用标定结果
showRectified 是否展示矫正结果
1.2 标定的流程
找到亚像素的角点,imagePoints[0]和imagePoints[1],分别对应左右两图;
findChessboardCorners
cornerSubPix
构建标定板的点坐标,objectPoints
1objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
3.分别得到两个相机的初始CameraMatrix
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);
4.双目视觉进行标定
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
标定精度的衡量,这部分注释就够了
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
保存标定结果
1略
矫正一张图像看看,是否可以
Mat R1, R2, P1, P2, Q; // 说明
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
//Precompute maps for cv::remap(),构建映射图
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
// 读图,矫正
Mat img = imread(goodImageList[i*2+k], 0); // 为何要用黑白图呢?
Mat rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
1.3 python的实现代码
# 0.基本配置
show_corners = False
image_number = 13
board_size = (9, 6) # 也就是boardSize
square_Size = 20
image_lists = [] # 存储获取到的图像
image_points = [] # 存储图像的点
# 1.读图,找角点
image_dir = "/home/wukong/opencv-4.1.0/samples/data"
image_names = []
[image_names.append(image_dir + "/left%02d.jpg" % i) for i in
[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]] # 没有10,坑爹
[image_names.append(image_dir + "/right%02d.jpg" % i) for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]]
print(len(image_names))
for image_name in image_names:
print(image_name)
image = cv2.imread(image_name, 0)
found, corners = cv2.findChessboardCorners(image, board_size) # 粗查找角点
if not found:
print("ERROR(no corners):" + image_name)
return None
# 展示结果
if show_corners:
vis = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, board_size, corners, found)
cv2.imwrite(image_name.split(os.sep)[-1], vis)
cv2.namedWindow("xxx", cv2.WINDOW_NORMAL)
cv2.imshow("xxx", vis)
cv2.waitKey()
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)
cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), term) # 精定位角点
image_points.append(corners.reshape(-1, 2))
image_lists.append(image)
# 2. 构建标定板的点坐标,objectPoints
object_points = np.zeros((np.prod(board_size), 3), np.float32)
object_points[:, :2] = np.indices(board_size).T.reshape(-1, 2)
object_points *= square_Size
object_points = [object_points] * image_number
# object_points = np.repeat(object_points[np.newaxis, :], 13, axis=0)
# print(object_points.shape)
# 3. 分别得到两个相机的初始CameraMatrix
h, w = image_lists[0].shape
camera_matrix = list()
camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[:image_number], (w, h), 0))
camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[image_number:], (w, h), 0))
# 4. 双目视觉进行标定
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 100, 1e-5)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F =
cv2.stereoCalibrate(object_points, image_points[:image_number], image_points[image_number:], camera_matrix[0],
None, camera_matrix[1], None, (w, h),
flags=cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_USE_INTRINSIC_GUESS |
cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5,
criteria=term)
# 5. 标定精度的衡量, TODO
# 6. 保存标定结果 TODO
# 7. 矫正一张图像看看,是否完成了极线矫正
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 =
cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)
map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)
map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)
start_time = time.time()
result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)
result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)
print("变形处理时间%f(s)" % (time.time() - start_time))
result = np.concatenate((result1, result2), axis=1)
result[::20, :] = 0
cv2.imwrite("rec.png", result)
极线矫正结果
整个结果看着还行哈。
2.图像根据标定结果进行极线矫正(stereoRectify 函数)
根据标定结果,放置新的相机
确认新的虚拟相机位置,满足极线平行关系
构造映射map
执行map 的变换 remap
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 =
cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)
map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)
map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)
result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)
result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)
3.在每条极线上寻找对应点(视差)
方法有很多
立体匹配的方法
StereoBM, block matching 算法,像素级别的位移,速度快
StereoSGBM,semi-global block matching算法,亚像素的精度,速度慢很多了,实时应用是不考虑的
StereoBeliefPropagation,据说是把这个问题当做了Markov随机场处理的,所以可以用信念传播的机制求解,这个目前尚未精通。也是要处理和学习的点。#TODO
4.根据视差转换为点云(cv2.reprojectImageTo3D)
只需要一步操作就完成了,很简单
1points = cv2.reprojectImageTo3D(disparity, Q)
5. 点云存储和显示
略,这些个在opencv/example/python中,应该都可以查看到。