//---------------------------------【头文件、命名空间包含部分】----------------------------
//		描述:包含程序所使用的头文件和命名空间
//------------------------------------------------------------------------------------------------
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <iostream>
using namespace cv;
using namespace std;


//-----------------------------------【main( )函数】--------------------------------------------
//		描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main()
{

	//【1】载入原始图片
	//Mat srcImage1 = imread("5.png", 1);
	//Mat srcImage2 = imread("6.bmp", 1);
	Mat srcImage1 = imread("1.jpg", 1);
	Mat srcImage2 = imread("2.jpg", 1);
	if (!srcImage1.data || !srcImage2.data)
	{
		printf("读取图片错误,请确定目录下是否有imread函数指定的图片存在~! \n");
		system("pause");
		return false;
	}
	//imshow("src1", srcImage1);
	//imshow("src2", srcImage2);

	//【1--0】/*两张图片一块儿显示*/
	Size imageSize = srcImage2.size();
	int height = 240,  width = 360;
	Mat imageTwo = Mat(imageSize.height, imageSize.width * 2, CV_8UC3);
	Rect imageLeft(0, 0, imageSize.width, imageSize.height);
	Rect imageRight(imageSize.width, 0, imageSize.width, imageSize.height);
	Mat imLeft = imageTwo(imageLeft);
	Mat imRight = imageTwo(imageRight);
	srcImage1.copyTo(imLeft);
	srcImage2.copyTo(imRight);
	if (imageTwo.data)
	{
		cv::imshow("image", imageTwo);
		//cv::waitKey();
	}

	//【2】使用SURF算子检测关键点
	int minHessian = 400;//SURF算法中的hessian阈值
	SurfFeatureDetector detector(minHessian);//定义一个SurfFeatureDetector(SURF) 特征检测类对象  
	vector<KeyPoint> keypoints_object, keypoints_scene;//vector模板类,存放任意类型的动态数组

	//【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
	detector.detect(srcImage1, keypoints_object);
	detector.detect(srcImage2, keypoints_scene);

	//【4】计算描述符(特征向量)
	SurfDescriptorExtractor extractor;
	Mat descriptors_object, descriptors_scene;
	extractor.compute(srcImage1, keypoints_object, descriptors_object);
	extractor.compute(srcImage2, keypoints_scene, descriptors_scene);

	//【5】使用FLANN匹配算子进行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > matches;
	matcher.match(descriptors_object, descriptors_scene, matches);
	double max_dist = 0; double min_dist = 100;//最小距离和最大距离

	//【6】计算出关键点之间距离的最大值和最小值
	for (int i = 0; i < descriptors_object.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}

	printf(">Max dist 最大距离 : %f \n", max_dist);
	printf(">Min dist 最小距离 : %f \n", min_dist);

	//【7】存下匹配距离小于3*min_dist的点对
	std::vector< DMatch > good_matches;
	for (int i = 0; i < descriptors_object.rows; i++)
	{
		if (matches[i].distance < 3 * min_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}

	//绘制出匹配到的关键点
	Mat img_matches;
	drawMatches(srcImage1, keypoints_object, srcImage2, keypoints_scene,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

	//定义两个局部变量
	vector<Point2f> obj;
	vector<Point2f> scene;

	//从匹配成功的匹配对中获取关键点
	for (unsigned int i = 0; i < good_matches.size(); i++)
	{
		obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
		scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
	}

	Mat H = findHomography(obj, scene, CV_RANSAC);//计算透视变换 

	//从待测图片中获取角点
	vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0); 
	obj_corners[1] = cvPoint(srcImage1.cols, 0);
	obj_corners[2] = cvPoint(srcImage1.cols, srcImage1.rows); 
	obj_corners[3] = cvPoint(0, srcImage1.rows);
	vector<Point2f> scene_corners(4);

	//进行透视变换
	perspectiveTransform(obj_corners, scene_corners, H);

	//绘制出角点之间的直线
	line(img_matches, scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 4);
	line(img_matches, scene_corners[1] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 4);
	line(img_matches, scene_corners[2] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 4);
	line(img_matches, scene_corners[3] + Point2f(static_cast<float>(srcImage1.cols), 0), scene_corners[0] + Point2f(static_cast<float>(srcImage1.cols), 0), Scalar(255, 0, 123), 4);

	//显示最终结果
	imshow("效果图", img_matches);

	waitKey(0);
	return 0;
}

不同的匹配算法



//---------------------------------【头文件、命名空间包含部分】----------------------------
//		描述:包含程序所使用的头文件和命名空间
//------------------------------------------------------------------------------------------------
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/features2d/features2d.hpp>
using namespace cv;
using namespace std;


//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main3()
{

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图", trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测Surf关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescriptor;
	SurfFeatureDetector featureDetector(80);
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SurfDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescriptor);

	//【3】创建基于FLANN的描述符匹配对象
	FlannBasedMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescriptor);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while (char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		int64 time0 = getTickCount();
		Mat testImage, testImage_gray;
		cap >> testImage;//采集视频到testImage中
		if (testImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(testImage, testImage_gray, CV_BGR2GRAY);

		//<3>检测S关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(testImage_gray, test_keyPoint);
		featureExtractor.compute(testImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for (unsigned int i = 0; i < matches.size(); i++)
		{
			if (matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(testImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}