使用opencv对图像进行去畸变:
1,先拍一组带有格子的图片,如下图
使用一下代码对图像进行标定和畸变参数计算:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <fstream>
using namespace cv;
using namespace std;
int main(int argc,char ** argv)
{
ifstream fin("calibdata.txt"); /* 标定所用图像文件的路径 */
ofstream fout("caliberation_result.txt"); /* 保存标定结果的文件 */
//读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
cout << "开始提取角点………………";
int image_count = 0; /* 图像数量 */
Size image_size; /* 图像的尺寸 */
Size board_size = Size(13, 9); /* 标定板上每行、列的角点数 */
vector<Point2f> image_points_buf; /* 缓存每幅图像上检测到的角点 */
vector<vector<Point2f>> image_points_seq; /* 保存检测到的所有角点 */
string filename;
int count = -1;//用于存储角点个数。
while (getline(fin, filename))
{
image_count++;
// 用于观察检验输出
cout << "image_count = " << image_count << endl;
/* 输出检验*/
cout << "-->count = " << count;
Mat imageInput = imread(filename);
//resize(imageInput, imageInput, cv::Size(640, 360));
if (imageInput.empty())
{
cout << "can not open pic!\n";
exit(-1);
}
if (image_count == 1) //读入第一张图片时获取图像宽高信息
{
image_size.width = imageInput.cols;
image_size.height = imageInput.rows;
cout << "image_size.width = " << image_size.width << endl;
cout << "image_size.height = " << image_size.height << endl;
}
/* 提取角点 */
if (0 == findChessboardCorners(imageInput, board_size, image_points_buf))
{
cout << "can not find chessboard corners!\n"; //找不到角点
cout << filename << endl;
exit(1);
}
else
{
Mat view_gray;
cvtColor(imageInput, view_gray, CV_RGB2GRAY);
/* 亚像素精确化 */
find4QuadCornerSubpix(view_gray, image_points_buf, Size(5, 5)); //对粗提取的角点进行精确化
//cornerSubPix(view_gray,image_points_buf,Size(5,5),Size(-1,-1),TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,0.1));
image_points_seq.push_back(image_points_buf); //保存亚像素角点
/* 在图像上显示角点位置 */
drawChessboardCorners(view_gray, board_size, image_points_buf, false); //用于在图片中标记角点
//namedWindow("Camera Calibration", 0);//创建窗口
//imshow("Camera Calibration", view_gray);//显示图片
//waitKey(500);//暂停0.5S
}
}
int total = image_points_seq.size();
cout << "total = " << total << endl;
int CornerNum = board_size.width*board_size.height; //每张图片上总的角点数
for (int ii = 0; ii<total; ii++)
{
if (0 == ii % CornerNum)// 24 是每幅图片的角点个数。此判断语句是为了输出 图片号,便于控制台观看
{
int i = -1;
i = ii / CornerNum;
int j = i + 1;
cout << "--> 第 " << j << "图片的数据 --> : " << endl;
}
if (0 == ii % 3) // 此判断语句,格式化输出,便于控制台查看
{
cout << endl;
}
else
{
cout.width(10);
}
//输出所有的角点
cout << " -->" << image_points_seq[ii][0].x;
cout << " -->" << image_points_seq[ii][0].y;
}
cout << "角点提取完成!\n";
//以下是摄像机标定
cout << "开始标定………………";
/*棋盘三维信息*/
Size square_size = Size(20, 20); /* 实际测量得到的标定板上每个棋盘格的大小 */
vector<vector<Point3f>> object_points; /* 保存标定板上角点的三维坐标 */
/*内外参数*/
Mat cameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 摄像机内参数矩阵 */
vector<int> point_counts; // 每幅图像中角点的数量
Mat distCoeffs = Mat(1, 5, CV_32FC1, Scalar::all(0)); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
vector<Mat> tvecsMat; /* 每幅图像的旋转向量 */
vector<Mat> rvecsMat; /* 每幅图像的平移向量 */
/* 初始化标定板上角点的三维坐标 */
int i, j, t;
for (t = 0; t<image_count; t++)
{
vector<Point3f> tempPointSet;
for (i = 0; i<board_size.height; i++)
{
for (j = 0; j<board_size.width; j++)
{
Point3f realPoint;
/* 假设标定板放在世界坐标系中z=0的平面上 */
realPoint.x = i * square_size.width;
realPoint.y = j * square_size.height;
realPoint.z = 0;
tempPointSet.push_back(realPoint);
}
}
object_points.push_back(tempPointSet);
}
/* 初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 */
for (i = 0; i<image_count; i++)
{
point_counts.push_back(board_size.width*board_size.height);
}
/* 开始标定 */
calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, 0);
cout << "标定完成!\n";
//对标定结果进行评价
cout << "开始评价标定结果………………\n";
double total_err = 0.0; /* 所有图像的平均误差的总和 */
double err = 0.0; /* 每幅图像的平均误差 */
vector<Point2f> image_points2; /* 保存重新计算得到的投影点 */
cout << "\t每幅图像的标定误差:\n";
fout << "每幅图像的标定误差:\n";
for (i = 0; i<image_count; i++)
{
vector<Point3f> tempPointSet = object_points[i];
/* 通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点 */
projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);
/* 计算新的投影点和旧的投影点之间的误差*/
vector<Point2f> tempImagePoint = image_points_seq[i];
Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
for (int j = 0; j < tempImagePoint.size(); j++)
{
image_points2Mat.at<Vec2f>(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
tempImagePointMat.at<Vec2f>(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
}
err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
total_err += err /= point_counts[i];
std::cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
}
std::cout << "总体平均误差:" << total_err / image_count << "像素" << endl;
fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;
std::cout << "评价完成!" << endl;
//保存定标结果
std::cout << "开始保存定标结果………………" << endl;
Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */
fout << "相机内参数矩阵:" << endl;
fout << cameraMatrix << endl << endl;
fout << "畸变系数:\n";
fout << distCoeffs << endl << endl << endl;
for (int i = 0; i<image_count; i++)
{
fout << "第" << i + 1 << "幅图像的旋转向量:" << endl;
fout << tvecsMat[i] << endl;
/* 将旋转向量转换为相对应的旋转矩阵 */
Rodrigues(tvecsMat[i], rotation_matrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
fout << rotation_matrix << endl;
fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
fout << rvecsMat[i] << endl << endl;
}
std::cout << "完成保存" << endl;
fout << endl;
/************************************************************************
显示定标结果
*************************************************************************/
Mat mapx = Mat(image_size, CV_32FC1);
Mat mapy = Mat(image_size, CV_32FC1);
Mat R = Mat::eye(3, 3, CV_32F);
std::cout << "保存矫正图像" << endl;
string imageFileName;
std::stringstream StrStm;
for (int i = 0; i != image_count; i++)
{
std::cout << "Frame #" << i + 1 << "..." << endl;
initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, image_size, CV_32FC1, mapx, mapy);
StrStm.clear();
imageFileName.clear();
string filePath = "chess";
StrStm << i + 1;
StrStm >> imageFileName;
filePath += imageFileName;
filePath += ".jpg";
Mat imageSource = imread(filePath); //读取畸变图片
//resize(imageSource, imageSource, cv::Size(640, 360));
Mat newimage = imageSource.clone(); //校正后输出图片
//另一种不需要转换矩阵的方式
// undistort(imageSource,newimage,cameraMatrix,distCoeffs);
remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
/*StrStm.clear();
filePath.clear();
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_d.jpg";*/
//imwrite(imageFileName, newimage);
imshow(imageFileName,newimage);
}
std::cout << "保存结束" << endl;
waitKey(0);
return 0;
}
2.使用自己写的代码对图像进行畸变矫正
/*
相机内参数矩阵:
[405.604001681076, 0, 326.7936700310688;
0, 406.8922808078534, 183.8961505771391;
0, 0, 1]
畸变系数:
[-0.3594713304015758, 0.159123377796673, -0.004352624596440089, -0.003947574570427057, -0.04091311712463985]
*/
#include <opencv2/opencv.hpp>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
string image_file = "C:/Users/Administrator/Desktop/fish/save8.bmp"; // 请确保路径正确
int main(int argc, char **argv) {
// 本程序需要你自己实现去畸变部分的代码。尽管我们可以调用OpenCV的去畸变,但自己实现一遍有助于理解。
// 畸变参数
//double k1 = -0.28340811, k2 = 0.07395907, p1 = 0.00019359, p2 = 1.76187114e-05;
double k1 = -0.3594713304015758, k2 = 0.159123377796673, p1 = -0.004352624596440089, p2 = -0.003947574570427057,k3 = -0.04091311712463985;
// 内参
double fx = 405.604001681076, fy = 406.8922808078534, cx = 326.7936700310688, cy = 183.8961505771391;
//double fx = 458.654, fy = 457.296, cx = 367.215, cy = 248.375;
cv::Mat image = cv::imread(image_file, 0); // 图像是灰度图,CV_8UC1
resize(image, image, cv::Size(640, 360));
int rows = image.rows, cols = image.cols;
cv::Mat image_undistort = cv::Mat(rows, cols, CV_8UC1); // 去畸变以后的图
// 计算去畸变后图像的内容
for (int v = 0; v < rows; v++)
for (int u = 0; u < cols; u++) {
double u_distorted = 0, v_distorted = 0;
// TODO 按照公式,计算点(u,v)对应到畸变图像中的坐标(u_distorted, v_distorted) (~6 lines)
// start your code here
//image_undistort中含有非畸变的图像坐标
//将image_undistort的坐标通过内参转换到归一化坐标系下,此时得到的归一化坐标是对的
//将得到的归一化坐标系进行畸变处理
//将畸变处理后的坐标通过内参转换为图像坐标系下的坐标
//这样就相当于是在非畸变图像的图像坐标和畸变图像的图像坐标之间建立了一个对应关系
//相当于是非畸变图像坐标在畸变图像中找到了映射
//对畸变图像进行遍历之后,然后赋值(一般需要线性插值,因为畸变后图像的坐标不一定是整数的),即可得到矫正之后的图像
double x1, y1, x2, y2;
x1 = (u - cx) / fx;
y1 = (v - cy) / fy;
double r2;
r2 = pow(x1, 2) + pow(y1, 2);
x2 = x1*(1 + k1*r2 + k2*pow(r2, 2) + k3 * r2 * r2 * r2) + 2 * p1*x1*y1 + p2*(r2 + 2 * x1*x1);
y2 = y1*(1 + k1*r2 + k2*pow(r2, 2) + k3 * r2 * r2 * r2) + p1*(r2 + 2 * y1*y1) + 2 * p2*x1*y1;
u_distorted = fx*x2 + cx;
v_distorted = fy*y2 + cy;
// end your code here
// 赋值 (最近邻插值)
if (u_distorted >= 0 && v_distorted >= 0 && u_distorted < cols && v_distorted < rows) {
image_undistort.at<uchar>(v, u) = image.at<uchar>((int)v_distorted, (int)u_distorted);
}
else {
image_undistort.at<uchar>(v, u) = 0;
}
}
// 画图去畸变后图像
//cv::imshow("image_file", image);
cv::waitKey(0);
return 0;
}
3.去畸变前的原始点和去畸变后的点对应关系
void myUndistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,const cv::Mat & cameraMatrix, const cv::Mat & distortionCoeff)
{
dst.clear();
double fx = cameraMatrix.at<double>(0, 0);
double fy = cameraMatrix.at<double>(1, 1);
double ux = cameraMatrix.at<double>(0, 2);
double uy = cameraMatrix.at<double>(1, 2);
double k1 = distortionCoeff.at<double>(0, 0);
double k2 = distortionCoeff.at<double>(0, 1);
double p1 = distortionCoeff.at<double>(0, 2);
double p2 = distortionCoeff.at<double>(0, 3);
double k3 = distortionCoeff.at<double>(0, 4);
double k4 = 0;
double k5 = 0;
double k6 = 0;
for (unsigned int i = 0; i < src.size(); i++)
{
const cv::Point2d & p = src[i];
printf("px = %f,py = %f\n", p.x, p.y);
//首先进行坐标转换;
double xDistortion = (p.x - ux) / fx;
double yDistortion = (p.y - uy) / fy;
double xCorrected, yCorrected;
double x0 = xDistortion;
double y0 = yDistortion;
//这里使用迭代的方式进行求解,因为根据2中的公式直接求解是困难的,所以通过设定初值进行迭代,这也是OpenCV的求解策略;
for (int j = 0; j < 10; j++)
{
double r2 = xDistortion*xDistortion + yDistortion*yDistortion;
double distRadialA = 1 / (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
double distRadialB = 1. + k4 * r2 + k5 * r2 * r2 + k6 * r2 * r2 * r2;
double deltaX = 2. * p1 * xDistortion * yDistortion + p2 * (r2 + 2. * xDistortion * xDistortion);
double deltaY = p1 * (r2 + 2. * yDistortion * yDistortion) + 2. * p2 * xDistortion * yDistortion;
xCorrected = (x0 - deltaX)* distRadialA * distRadialB;
yCorrected = (y0 - deltaY)* distRadialA * distRadialB;
xDistortion = xCorrected;
yDistortion = yCorrected;
}
//进行坐标变换;
xCorrected = xCorrected * fx + ux;
yCorrected = yCorrected * fy + uy;
dst.push_back(cv::Point2d(xCorrected, yCorrected));
}
}