学习自:python opencv中文

摄像头标定的理解隐藏在注释里

init(对象点,图像点)--->对每一张图进行操作--->寻找角点--->寻找亚像素精度角点--->画出角点--->通过图像点和对象点找出摄像机的内部参数和畸变矩阵--->畸变矫正--->去除畸变--->计算误差

# encoding: utf-8
#!/usr/bin/python
import cv2
import glob
import numpy as np
from matplotlib import pyplot as plt

#设置终止条件
crireria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 0.001)

# 做一些3D点
objp = np.zeros((6*7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
print(objp)
objpoints = []
imgpoints = []

images = glob.glob('D:\\PICTURE\\checkerboard\\*.jpg')
for fname in images:
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

    if ret == True:
        #画出亚像素精度角点
        objpoints.append(objp)
        corners2 = cv2.cornerSubPix(gray, corners, (7, 6), (-1, -1), crireria)
        imgpoints.append(corners2)
        cv2.drawChessboardCorners(img, (7, 6), corners2, ret)

        # 标定
        # Size imageSize, 在计算相机内部参数和畸变矩阵需要
        # cameraMatrix 为内部参数矩阵, 输入一个cvMat
        # distCoeffs为畸变矩阵.
        # 我们要使用的函数是 cv2.calibrateCamera()。它会返回摄像机矩阵,畸变系数,旋转和变换向量等。
        # mtx内参矩阵, dist畸变系数, rvecs旋转向量,外餐 tvecs平移向量,内参
        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape, None, None)

        print(fname, "rvecs", rvecs)
        cv2.imshow('img', img)

        k = cv2.waitKey(500) & 0xff
        h, w = img.shape[:2]

        # 畸变矫正
        # 如果缩放系数 alpha = 0,返回的非畸变图像会带有最少量的不想要的像素。
        # 它甚至有可能在图像角点去除一些像素。如果 alpha = 1,所有的像素都会被返回,
        # 还有一些黑图像。它还会返回一个 ROI 图像,我们可以用来对结果进行裁剪。
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))  # roi只是一个元组
        x, y, w, h = roi
        print(roi)

        # 去除畸变
        if k == ord('q'):
            # undistort
            dst = cv2.undistort(img, mtx, dist, newCameraMatrix=newcameramtx)
            dst = dst[y:y+h, x:x+w]
            cv2.imshow('undistortimg', dst)
        else:
            # remapping。先找到畸变到非畸变的映射方程,再用重映射方程
            mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
            dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
            # dst = dst[y:y+h, x:x+w] #你可以试试注释这一步
            cv2.imshow('undistortimg ', dst)
            cv2.waitKey()

        # 我们可以用反向投影对我们找到的参数的准确性评估,结果约接近0越好
        # 有了内部参数:畸变参数和旋转变换矩阵,我们可以用cv2.projectPoints()去
        # 把对象点转换到图像点,然后就可以计算变换得到图像与角点检测算法的绝对差了。
        # 然后我们计算所有标定图像的误差平均值。
        mean_error = 0
        for i in range(len(objpoints)):
            imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
            error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
            mean_error += error
        print("total error: ", mean_error/len(objpoints))

关于将dst, mtx存储进文件,并提取的相关代码。但是最开始我不会这个,就写进了两个文件,然后分别读取mtx, dist。

# 存文件
np.savez('fname.npz', mtx = mtx, dist = dist)

#取文件
with np.load('fname.npz') as X:
    mtx = X['mtx']
    dist = X['dist']

如果读取出的mtx类似于这个你就成功了

python相机标定算法 相机标定python源码_opencv

# encoding: utf-8
#!/usr/bin/python
import cv2
import glob
import numpy as np
from matplotlib import pyplot as plt

mtxx = np.loadtxt('D:\\PICTURE\\33.txt')
distt = np.loadtxt('D:\\PICTURE\\34.txt')
print(mtxx,'\n')
print(distt)

crireria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 0.001)

# 做一些3D点
objp = np.zeros((6*7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

axis1 = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]])
axis2 = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],
                   [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])

def draw1(img, corners, imgpts):
    corner = tuple(corners[0].ravel())  # ravel()将多维数组降为一维, 会影响原始矩阵
    img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
    return img

def draw2(img, corners, imgpts):
    imgpts = np.int32(imgpts).reshape(-1, 2)

    #画出绿色背景
    img = cv2.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -1)
    cv2.imshow('c', img)

    #画蓝色线条
    for i, j in zip(range(4), range(4, 8)):
        img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]), (255, 0, 0), 3)

    #画红色线条
    img = cv2.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)
    return img

images = glob.glob('D:\\PICTURE\\checkerboard\\left*.jpg')
for fname in images:
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

    if ret == True:
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), crireria)
        _, rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners2, mtxx, distt)
        #将对象点转换到图像点
        k = cv2.waitKey(500) & 0xff
        if k == ord('q'):
            imgpts, jac = cv2.projectPoints(axis1, rvecs, tvecs, mtxx, distt)
            print('imgpts', imgpts)
            img = draw1(img, corners2, imgpts)
        else:
            imgpts, jac = cv2.projectPoints(axis2, rvecs, tvecs, mtxx, distt)
            print('imgpts', imgpts)
            img = draw2(img, corners2, imgpts)
        cv2.imshow('img', img)