OpenCV轮廓检测,计算物体旋转角度
#include "stdafx.h"
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define PI 3.1415926
using namespace std;
using namespace cv;
int hough_line(Mat src)
{
//【1】载入原始图和Mat变量定义
Mat srcImage = src;//imread("1.jpg"); //工程目录下应该有一张名为1.jpg的素材图
Mat midImage,dstImage;//临时变量和目标图的定义
//【2】进行边缘检测和转化为灰度图
Canny(srcImage, midImage, 50, 200, 3);//进行一此canny边缘检测
cvtColor(midImage,dstImage, CV_GRAY2BGR);//转化边缘检测后的图为灰度图
//【3】进行霍夫线变换
vector<Vec4i> lines;//定义一个矢量结构lines用于存放得到的线段矢量集合
HoughLinesP(midImage, lines, 1, CV_PI/180, 80, 50, 10 );
//【4】依次在图中绘制出每条线段
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
line( dstImage, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(186,88,255), 1, CV_AA);
}
//【5】显示原始图
imshow("【原始图】", srcImage);
//【6】边缘检测后的图
imshow("【边缘检测后的图】", midImage);
//【7】显示效果图
imshow("【效果图】", dstImage);
//waitKey(0);
return 0;
}
int main()
{
// Read input binary image
char *image_name = "c:\\img\\1.bmp";
cv::Mat image = cv::imread(image_name,0);
if (!image.data)
return 0;
cv::namedWindow("Binary Image");
cv::imshow("Binary Image",image);
// 从文件中加载原图
IplImage *pSrcImage = cvLoadImage(image_name, CV_LOAD_IMAGE_UNCHANGED);
// 转为2值图
cvThreshold(pSrcImage,pSrcImage,200,255,cv::THRESH_BINARY_INV);
image = cv::Mat(pSrcImage,true);
cv::imwrite("binary.jpg",image);
// Get the contours of the connected components
std::vector<std::vector<cv::Point>> contours;
cv::findContours(image,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE); // retrieve all pixels of each contours
// Print contours' length
std::cout << "Contours: " << contours.size() << std::endl;
std::vector<std::vector<cv::Point>>::const_iterator itContours= contours.begin();
for ( ; itContours!=contours.end(); ++itContours)
{
std::cout << "Size: " << itContours->size() << std::endl;
}
// draw black contours on white image
cv::Mat result(image.size(),CV_8U,cv::Scalar(255));
cv::drawContours(result,contours,
-1, // draw all contours
cv::Scalar(0), // in black
2); // with a thickness of 2
cv::namedWindow("Contours");
cv::imshow("Contours",result);
// Eliminate too short or too long contours
int cmin= 100; // minimum contour length
int cmax= 1000; // maximum contour length
std::vector<std::vector<cv::Point>>::const_iterator itc= contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax)
itc= contours.erase(itc);
else
++itc;
}
// draw contours on the original image
cv::Mat original= cv::imread(image_name);
cv::drawContours(original,contours,
-1, // draw all contours
cv::Scalar(255,255,0), // in white
2); // with a thickness of 2
cv::namedWindow("Contours on original");
cv::imshow("Contours on original",original);
// Let's now draw black contours on white image
result.setTo(cv::Scalar(255));
cv::drawContours(result,contours,
-1, // draw all contours
cv::Scalar(0), // in black
1); // with a thickness of 1
image= cv::imread("binary.jpg",0);
//imshow("lll",result);
//waitKey(0);
// testing the bounding box
//
//霍夫变换进行直线检测,此处使用的是probabilistic Hough transform(cv::HoughLinesP)而不是standard Hough transform(cv::HoughLines)
cv::Mat result_line(image.size(),CV_8U,cv::Scalar(255));
result_line = result.clone();
hough_line(result_line);
//Mat tempimage;
//【2】进行边缘检测和转化为灰度图
//Canny(result_line, tempimage, 50, 200, 3);//进行一此canny边缘检测
//imshow("canny",tempimage);
//waitKey(0);
//cvtColor(tempimage,result_line, CV_GRAY2BGR);//转化边缘检测后的图为灰度图
vector<Vec4i> lines;
cv::HoughLinesP(result_line,lines,1,CV_PI/180,80,50,10);
for(int i = 0; i < lines.size(); i++)
{
line(result_line,cv::Point(lines[i][0],lines[i][1]),cv::Point(lines[i][2],lines[i][3]),Scalar(0,0,0),2,8,0);
}
cv::namedWindow("line");
cv::imshow("line",result_line);
//waitKey(0);
/
//
//std::vector<std::vector<cv::Point>>::const_iterator itc_rec= contours.begin();
//while (itc_rec!=contours.end())
//{
// cv::Rect r0= cv::boundingRect(cv::Mat(*(itc_rec)));
// cv::rectangle(result,r0,cv::Scalar(0),2);
// ++itc_rec;
//}
//cv::namedWindow("Some Shape descriptors");
//cv::imshow("Some Shape descriptors",result);
CvBox2D End_Rage2D;
CvPoint2D32f rectpoint[4];
CvMemStorage *storage = cvCreateMemStorage(0); //开辟内存空间
CvSeq* contour = NULL; //CvSeq类型 存放检测到的图像轮廓边缘所有的像素值,坐标值特征的结构体以链表形式
cvFindContours( pSrcImage, storage, &contour, sizeof(CvContour),CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);//这函数可选参数还有不少
for(; contour; contour = contour->h_next) //如果contour不为空,表示找到一个以上轮廓,这样写法只显示一个轮廓
//如改为for(; contour; contour = contour->h_next) 就可以同时显示多个轮廓
{
End_Rage2D = cvMinAreaRect2(contour);
//代入cvMinAreaRect2这个函数得到最小包围矩形 这里已得出被测物体的角度,宽度,高度,和中点坐标点存放在CvBox2D类型的结构体中,
//主要工作基本结束。
for(int i = 0;i< 4;i++)
{
//CvArr* s=(CvArr*)&result;
//cvLine(s,cvPointFrom32f(rectpoint[i]),cvPointFrom32f(rectpoint[(i+1)%4]),CV_G(0,0,255),2);
line(result,cvPointFrom32f(rectpoint[i]),cvPointFrom32f(rectpoint[(i+1)%4]),Scalar(125),2);
}
cvBoxPoints(End_Rage2D,rectpoint);
std::cout <<" angle:\n"<<(float)End_Rage2D.angle << std::endl; //被测物体旋转角度
}
cv::imshow("lalalal",result);
cv::waitKey();
return 0;
}
提取连通区域轮廓
#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
using namespace std;
using namespace cv;
// 移除过小或过大的轮廓
void getSizeContours(vector<vector<Point>> &contours)
{
int cmin = 100; // 最小轮廓长度
int cmax = 1000; // 最大轮廓长度
vector<vector<Point>>::const_iterator itc = contours.begin();
while(itc != contours.end())
{
if((itc->size()) < cmin || (itc->size()) > cmax)
{
itc = contours.erase(itc);
}
else ++ itc;
}
}
// 计算连通区域的轮廓,即二值图像中相连像素的形状
int main()
{
Mat image = imread("c:\\img\\1.bmp",0);
if(!image.data)
{
cout << "Fail to load image" << endl;
return 0;
}
Mat imageShold;
threshold(image, imageShold, 100, 255, THRESH_BINARY); // 必须进行二值化
vector<vector<Point>> contours;
//CV_CHAIN_APPROX_NONE 获取每个轮廓每个像素点
findContours(imageShold, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
getSizeContours(contours);
cout << contours.size() << endl;
Mat result(image.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, Scalar(0), 2); // -1 表示所有轮廓
namedWindow("result");
imshow("result", result);
namedWindow("image");
imshow("image", image);
waitKey(0);
return 0;
}
计算连通区域数目与最大连通区域并标示出
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
int main( int argc, char** argv )
{
IplImage* src = cvLoadImage("c:\\img\\1.png", CV_LOAD_IMAGE_GRAYSCALE);
IplImage* dst = cvCreateImage(cvGetSize(src), 8, 3);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* contour = 0;
cvThreshold(src, src,120, 255, CV_THRESH_BINARY); // 二值化
cvNamedWindow("Source", 1);
cvShowImage("Source", src);
// 提取轮廓
int contour_num = cvFindContours(src, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
cvZero(dst); // 清空数组
CvSeq *_contour = contour;
double maxarea = 0;
double minarea = 100;
int m = 0;
for( ; contour != 0; contour = contour->h_next )
{
double tmparea = fabs(cvContourArea(contour));
if(tmparea < minarea)
{
cvSeqRemove(contour, 0); // 删除面积小于设定值的轮廓
continue;
}
CvRect aRect = cvBoundingRect( contour, 0 );
if ((aRect.width/aRect.height)<1)
{
cvSeqRemove(contour, 0); //删除宽高比例小于设定值的轮廓
continue;
}
if(tmparea > maxarea)
{
maxarea = tmparea;
}
m++;
// 创建一个色彩值
CvScalar color = CV_RGB( 0, 255, 255 );
//max_level 绘制轮廓的最大等级。如果等级为0,绘制单独的轮廓。如果为1,绘制轮廓及在其后的相同的级别下轮廓
//如果值为2,所有的轮廓。如果等级为2,绘制所有同级轮廓及所有低一级轮廓,诸此种种
//如果值为负数,函数不绘制同级轮廓,但会升序绘制直到级别为abs(max_level)-1的子轮廓
cvDrawContours(dst, contour, color, color, -1, 1, 8); //绘制外部和内部的轮廓
}
contour = _contour;
int count = 0;
for(; contour != 0; contour = contour->h_next)
{
count++;
double tmparea = fabs(cvContourArea(contour));
if (tmparea == maxarea)
{
CvScalar color = CV_RGB( 255, 0, 0);
cvDrawContours(dst, contour, color, color, -1, 1, 8);
}
}
printf("The total number of contours is:%d", count);
cvNamedWindow("Components", 1);
cvShowImage("Components", dst);
cvWaitKey(0);
cvDestroyWindow("Source");
cvReleaseImage(&src);
cvDestroyWindow("Components");
cvReleaseImage(&dst);
return 0;
}
opencv 显示最小面积的外接矩形,并求该矩形的长和宽以及四个角的位置
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <math.h>
int main(int argc,char argv)
{
IplImage *src,*gray,*bw,*dst;
CvMemStorage* storage=cvCreateMemStorage(0);
CvSeq* contour=0;
char* filename= "c:\\img\\1.png";
if(!filename)
printf("can't open the file:%d\n",filename);
src=cvLoadImage(filename,1);
cvNamedWindow("image",1);
cvShowImage("image",src);
gray=cvCreateImage(cvSize(src->width,src->height),src->depth,1);
cvCvtColor(src,gray,CV_BGR2GRAY);
int hei,wid;
hei=gray->height;//注意此处是gray,otsu中要用到hei,wid,已在otsu.h中全局定义;
wid=gray->width;
printf("图像的高为:%d,宽为:%d\n\n",hei,wid);
cvNamedWindow("image2",1);
cvShowImage("image2",gray);
bw=cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
cvThreshold(gray,bw,128,255,CV_THRESH_BINARY_INV);
cvNamedWindow("image4",1);
cvShowImage("image4",bw);
dst=cvCloneImage(src);
cvFindContours(bw,storage,&contour,sizeof(CvContour),CV_RETR_TREE,CV_CHAIN_APPROX_SIMPLE);
for(;contour!=0;contour=contour->h_next)
{ CvBox2D rect=cvMinAreaRect2(contour,storage);
CvPoint2D32f rect_pts0[4];
cvBoxPoints(rect, rect_pts0);//在c++中已经被废除。
//因为cvPolyLine要求点集的输入类型是CvPoint
//所以要把 CvPoint2D32f 型的 rect_pts0 转换为 CvPoint 型的 rect_pts
//并赋予一个对应的指针 *pt
int npts = 4,k=0;
int aaa=0,bbb=0;
CvPoint rect_pts[4], *pt = rect_pts;
printf("连通区域最小外接矩形顶点坐标分别为:\n");
for (int i=0; i<4; i++)
{
rect_pts[i]= cvPointFrom32f(rect_pts0[i]);
printf("%d %d\n",rect_pts[i].x,rect_pts[i].y);
aaa=(int)sqrt((pow((float)(rect_pts[0].x-rect_pts[1].x),2)+pow((float)(rect_pts[0].y-rect_pts[1].y),2)));
bbb=(int)sqrt((pow((float)(rect_pts[0].x-rect_pts[3].x),2)+pow((float)(rect_pts[0].y-rect_pts[3].y),2)));
if(aaa<bbb)
{
k=aaa;
aaa=bbb;
bbb=k;
}
}
printf("最小外接矩形的长为:%d,宽为:%d。\n\n",aaa,bbb);
cvPolyLine(dst, &pt, &npts, 1, 1, CV_RGB(255,0,0), 1);
}
cvNamedWindow("image5",1);
cvShowImage("image5",dst);
cvWaitKey(0);//注意此句放的位置,放的不对则。。。
cvDestroyWindow("image");
cvDestroyWindow("image2");
cvDestroyWindow("image4");
cvDestroyWindow("image5");
cvReleaseImage(&src);
cvReleaseImage(&gray);
cvReleaseImage(&bw);
cvReleaseImage(&dst);
return 0;
}
数字分割
#include "stdafx.h"
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include "iostream"
#include <vector>
using namespace std;
CvRect CutTopButtom(IplImage *src);
vector<CvRect>leftright;
//vector<CvRect>::iterator iter;
int lstart=0,lend=0,lrun=0;
int lastStart=0,lastEnd=0;
IplImage *dst[10];
CvRect rectRoi;
void FindLeftRight(IplImage *src);
void RememberLeftRight(IplImage *src,int lstart,int lend);
void ShowAndCut(IplImage *src[]);
IplImage * raw=cvLoadImage("c:\\img\\3.bmp",0);
IplImage * img;
int _tmain(int argc, _TCHAR* argv[])
{
cvSmooth(raw,raw);
cvThreshold(raw,raw,70,255,CV_THRESH_BINARY);
IplConvKernel *element=0; //结构元素
cvMorphologyEx(raw,raw,NULL,element,CV_MOP_OPEN); //开运算
rectRoi=CutTopButtom(raw);
img=cvCreateImage(cvGetSize(raw),IPL_DEPTH_8U,1);
cvCopy(raw,img);
FindLeftRight(raw);
return 0;
}
CvRect CutTopButtom(IplImage *src)
{
int* h=new int[src->height];
memset(h,0,src->height*4);
int hstart,hend,x,y,zhongdian;
CvScalar s;
for(y=0;y<src->height;y++)
{
for(x=0;x<src->width;x++)
{
s=cvGet2D(src,y,x);
if(s.val[0]==0)
h[y]++;
}
}
for (int j=0;j!=src->height;j++)
{
if (h[j]>15)
{
zhongdian=j;
hstart=zhongdian;
break;
}
else
continue;
}
while(zhongdian!=src->height)
{
if (h[zhongdian]<10)
{
hend=zhongdian;
break;
}
else
hend=zhongdian;
zhongdian++;
}
CvRect rect=cvRect(0,hstart-10,src->width,hend-hstart+20);
cvSetImageROI(src,rect);
return rect;
}
void FindLeftRight(IplImage *src)
{
int* v=new int[rectRoi.width];
memset(v,0,rectRoi.width*4);
int x,y;
CvScalar s;
for(x=0;x<rectRoi.width;x++)
{
for(y=0;y<rectRoi.height;y++)
{
s=cvGet2D(src,y,x);
if(s.val[0]==0)
v[x]++;
else
continue;
}
}
if (lend<=rectRoi.width)
{
for (int i=0;i<rectRoi.width;i++)
{
if(v[i]>10)
{
lrun=i;
lstart=lrun;
break;
}
else
continue;
}
if (lstart==lrun)
{
for (lrun;lrun<rectRoi.width;lrun++)
{
if (v[lrun]<5)
{
lend=lrun;
break;
}
else
continue;
}
}
RememberLeftRight(src,lstart,lend);
}
else
ShowAndCut(dst);
}
void RememberLeftRight(IplImage *src,int lstart,int lend)
{
if (lastStart==0&&lastEnd==0)
{
lastStart=lastStart+lstart;
lastEnd=lastEnd+lend;
}
else
{
lastStart=lastEnd+lstart;
lastEnd=lastEnd+lend;
}
CvRect Rect=cvRect(lastStart-10,0,lastEnd-lastStart+20,rectRoi.height);
//CvRect Rect=cvRect(lastStart-5,0,lend-lstart+15,rectRoi.height);
leftright.push_back(Rect);
rectRoi=cvRect(lend,0,(rectRoi.width-lend),rectRoi.height);
cvSetImageROI(src,rectRoi);
FindLeftRight(src);
}
void ShowAndCut(IplImage *src[])
{
int i=0;
for (vector<CvRect>::iterator iter=leftright.begin();iter!=leftright.end();iter++,i++)
{
src[i]=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
cvCopy(img,src[i]);
cvSetImageROI(src[i],*iter);
cvNamedWindow("src");
cvShowImage("src",src[i]);
cvWaitKey(3000);
}
}
车牌识别
拆分
#include "stdafx.h"
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <ml.h>
#define HORIZONTAL 1
#define VERTICAL 0
using namespace std;
using namespace cv;
//typedef struct CharSegment{
// Mat img;
// Rect mr;
// CharSegment(Mat a,Rect b){
// img=a;
// mr=b;
// }
//};
bool verifySizes(Mat r){
//Char sizes 45x77
float aspect=45.0f/77.0f;
float charAspect= (float)r.cols/(float)r.rows;
float error=0.35;
float minHeight=15;
float maxHeight=28;
//We have a different aspect ratio for number 1, and it can be ~0.2
float minAspect=0.2;
float maxAspect=aspect+aspect*error;
//area of pixels
float area=countNonZero(r);
//bb area
float bbArea=r.cols*r.rows;
//% of pixel in area
float percPixels=area/bbArea;
/*if(DEBUG)
cout << "Aspect: "<< aspect << " ["<< minAspect << "," << maxAspect << "] " << "Area "<< percPixels <<" Char aspect " << charAspect << " Height char "<< r.rows << "\n";*/
if(percPixels < 0.8 && charAspect > minAspect && charAspect < maxAspect && r.rows >= minHeight && r.rows < maxHeight)
return true;
else
return false;
}
Mat preprocessChar(Mat in){
//Remap image
int h=in.rows;
int w=in.cols;
int charSize=20; //统一每个字符的大小
Mat transformMat=Mat::eye(2,3,CV_32F);
int m=max(w,h);
transformMat.at<float>(0,2)=m/2 - w/2;
transformMat.at<float>(1,2)=m/2 - h/2;
Mat warpImage(m,m, in.type());
warpAffine(in, warpImage, transformMat, warpImage.size(), INTER_LINEAR, BORDER_CONSTANT, Scalar(0) );
Mat out;
resize(warpImage, out, Size(charSize, charSize) );
return out;
}
//create the accumulation histograms,img is a binary image, t is 水平或垂直
Mat ProjectedHistogram(Mat img, int t)
{
int sz=(t)?img.rows:img.cols;
Mat mhist=Mat::zeros(1,sz,CV_32F);
for(int j=0; j<sz; j++){
Mat data=(t)?img.row(j):img.col(j);
mhist.at<float>(j)=countNonZero(data); //统计这一行或一列中,非零元素的个数,并保存到mhist中
}
//Normalize histogram
double min, max;
minMaxLoc(mhist, &min, &max);
if(max>0)
mhist.convertTo(mhist,-1 , 1.0f/max, 0);//用mhist直方图中的最大值,归一化直方图
return mhist;
}
Mat getVisualHistogram(Mat *hist, int type)
{
int size=100;
Mat imHist;
if(type==HORIZONTAL){
imHist.create(Size(size,hist->cols), CV_8UC3);
}else{
imHist.create(Size(hist->cols, size), CV_8UC3);
}
imHist=Scalar(55,55,55);
for(int i=0;i<hist->cols;i++){
float value=hist->at<float>(i);
int maxval=(int)(value*size);
Point pt1;
Point pt2, pt3, pt4;
if(type==HORIZONTAL){
pt1.x=pt3.x=0;
pt2.x=pt4.x=maxval;
pt1.y=pt2.y=i;
pt3.y=pt4.y=i+1;
line(imHist, pt1, pt2, CV_RGB(220,220,220),1,8,0);
line(imHist, pt3, pt4, CV_RGB(34,34,34),1,8,0);
pt3.y=pt4.y=i+2;
line(imHist, pt3, pt4, CV_RGB(44,44,44),1,8,0);
pt3.y=pt4.y=i+3;
line(imHist, pt3, pt4, CV_RGB(50,50,50),1,8,0);
}else{
pt1.x=pt2.x=i;
pt3.x=pt4.x=i+1;
pt1.y=pt3.y=100;
pt2.y=pt4.y=100-maxval;
line(imHist, pt1, pt2, CV_RGB(220,220,220),1,8,0);
line(imHist, pt3, pt4, CV_RGB(34,34,34),1,8,0);
pt3.x=pt4.x=i+2;
line(imHist, pt3, pt4, CV_RGB(44,44,44),1,8,0);
pt3.x=pt4.x=i+3;
line(imHist, pt3, pt4, CV_RGB(50,50,50),1,8,0);
}
}
return imHist ;
}
void drawVisualFeatures(Mat character, Mat hhist, Mat vhist, Mat lowData,int count){
Mat img(121, 121, CV_8UC3, Scalar(0,0,0));
Mat ch;
Mat ld;
char res[20];
cvtColor(character, ch, CV_GRAY2RGB);
resize(lowData, ld, Size(100, 100), 0, 0, INTER_NEAREST );//将ld从15*15扩大到100*100
cvtColor(ld,ld,CV_GRAY2RGB);
Mat hh=getVisualHistogram(&hhist, HORIZONTAL);
Mat hv=getVisualHistogram(&vhist, VERTICAL);
//Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height)
Mat subImg=img(Rect(0,101,20,20));//ch:20*20
ch.copyTo(subImg);
subImg=img(Rect(21,101,100,20));//hh:100*hist.cols
hh.copyTo(subImg);
subImg=img(Rect(0,0,20,100));//hv:hist.cols*100
hv.copyTo(subImg);
subImg=img(Rect(21,0,100,100));//ld:100*100
ld.copyTo(subImg);
line(img, Point(0,100), Point(121,100), Scalar(0,0,255));
line(img, Point(20,0), Point(20,121), Scalar(0,0,255));
sprintf(res,"hist%d.jpg",count);
imwrite(res,img);
//imshow("Visual Features", img);
cvWaitKey(0);
}
Mat features(Mat in, int sizeData,int count){
//Histogram features
Mat vhist=ProjectedHistogram(in,VERTICAL);
Mat hhist=ProjectedHistogram(in,HORIZONTAL);
//Low data feature
Mat lowData;
resize(in, lowData, Size(sizeData, sizeData) );
//画出直方图
drawVisualFeatures(in, hhist, vhist, lowData,count);
//Last 10 is the number of moments components
int numCols=vhist.cols+hhist.cols+lowData.cols*lowData.cols;
Mat out=Mat::zeros(1,numCols,CV_32F);
//Asign values to feature,ANN的样本特征为水平、垂直直方图和低分辨率图像所组成的矢量
int j=0;
for(int i=0; i<vhist.cols; i++)
{
out.at<float>(j)=vhist.at<float>(i);
j++;
}
for(int i=0; i<hhist.cols; i++)
{
out.at<float>(j)=hhist.at<float>(i);
j++;
}
for(int x=0; x<lowData.cols; x++)
{
for(int y=0; y<lowData.rows; y++){
out.at<float>(j)=(float)lowData.at<unsigned char>(x,y);
j++;
}
}
//if(DEBUG)
// cout << out << "\n===========================================\n";
return out;
}
int _tmain(int argc, _TCHAR* argv[])
{
Mat input = imread("c:\\img\\4.jpg",CV_LOAD_IMAGE_GRAYSCALE);
char res[20];
int i = 0;
//vector<CharSegment> output;
//Threshold input image
Mat img_threshold;
threshold(input, img_threshold, 60, 255, CV_THRESH_BINARY_INV);
Mat img_contours;
img_threshold.copyTo(img_contours);
//Find contours of possibles characters
vector< vector< Point> > contours;
findContours(img_contours,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE); // all pixels of each contours
// Draw blue contours on a white image
cv::Mat result;
input.copyTo(result);
cvtColor(result, result, CV_GRAY2RGB);
//cv::drawContours(result,contours,
// -1, // draw all contours
// cv::Scalar(0,0,255), // in blue
// 1); // with a thickness of 1
//Start to iterate to each contour founded
vector<vector<Point> >::iterator itc= contours.begin();
//Remove patch that are no inside limits of aspect ratio and area.
while (itc!=contours.end()) {
//Create bounding rect of object
Rect mr= boundingRect(Mat(*itc));
//rectangle(result, mr, Scalar(255,0,0),2);
//Crop image
Mat auxRoi(img_threshold, mr);
if(verifySizes(auxRoi)){
auxRoi=preprocessChar(auxRoi);
//output.push_back(CharSegment(auxRoi, mr));
//保存每个字符图片
sprintf(res,"train_data_%d.jpg",i);
i++;
imwrite(res,auxRoi);
rectangle(result, mr, Scalar(0,0,255),2);
//对每一个小方块,提取直方图特征
Mat f=features(auxRoi,15,i);
}
++itc;
}
imwrite("result1.jpg",result);
imshow("car_plate",result);
waitKey(0);
return 0;
}
训练
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <iostream>
#include <vector>
#define HORIZONTAL 1
#define VERTICAL 0
using namespace std;
using namespace cv;
//西班牙车牌共30种字符,下面为每个字符的图片个数【没给,需人工挑选】
const int numFilesChars[]={35, 40, 42, 41, 42, 33, 30, 31, 49, 44, 30, 24, 21, 20, 34, 9, 10, 3, 11, 3, 15, 4, 9, 12, 10, 21, 18, 8, 15, 7};
const char strCharacters[] = {'0','1','2','3','4','5','6','7','8','9','B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z'};
const int numCharacters=30;
Mat ProjectedHistogram(Mat img, int t)
{
int sz=(t)?img.rows:img.cols;
Mat mhist=Mat::zeros(1,sz,CV_32F);
for(int j=0; j<sz; j++){
Mat data=(t)?img.row(j):img.col(j);
mhist.at<float>(j)=countNonZero(data); //统计这一行或一列中,非零元素的个数,并保存到mhist中
}
//Normalize histogram
double min, max;
minMaxLoc(mhist, &min, &max);
if(max>0)
mhist.convertTo(mhist,-1 , 1.0f/max, 0);//用mhist直方图中的最大值,归一化直方图
return mhist;
}
Mat features(Mat in, int sizeData){
//Histogram features
Mat vhist=ProjectedHistogram(in,VERTICAL);
Mat hhist=ProjectedHistogram(in,HORIZONTAL);
//Low data feature
Mat lowData;
resize(in, lowData, Size(sizeData, sizeData) );
//Last 10 is the number of moments components
int numCols=vhist.cols+hhist.cols+lowData.cols*lowData.cols;
Mat out=Mat::zeros(1,numCols,CV_32F);
//Asign values to feature,ANN的样本特征为水平、垂直直方图和低分辨率图像所组成的矢量
int j=0;
for(int i=0; i<vhist.cols; i++)
{
out.at<float>(j)=vhist.at<float>(i);
j++;
}
for(int i=0; i<hhist.cols; i++)
{
out.at<float>(j)=hhist.at<float>(i);
j++;
}
for(int x=0; x<lowData.cols; x++)
{
for(int y=0; y<lowData.rows; y++){
out.at<float>(j)=(float)lowData.at<unsigned char>(x,y);
j++;
}
}
return out;
}
int main ( int argc, char** argv )
{
cout << "OpenCV Training OCR Automatic Number Plate Recognition\n";
cout << "\n";
char* path;
//Check if user specify image to process
if(argc >= 1 )
{
path= argv[1];
}else{
cout << "Usage:\n" << argv[0] << " <path to chars folders files> \n";
return 0;
}
Mat classes;
Mat trainingDataf5;
Mat trainingDataf10;
Mat trainingDataf15;
Mat trainingDataf20;
vector<int> trainingLabels;
//OCR ocr;
for(int i=0; i< numCharacters; i++)
{
int numFiles=numFilesChars[i];
for(int j=0; j< numFiles; j++){
cout << "Character "<< strCharacters[i] << " file: " << j << "\n";
stringstream ss(stringstream::in | stringstream::out);
ss << path << strCharacters[i] << "/" << j << ".jpg";
Mat img=imread(ss.str(), 0);
Mat f5=features(img, 5);
Mat f10=features(img, 10);
Mat f15=features(img, 15);
Mat f20=features(img, 20);
trainingDataf5.push_back(f5);
trainingDataf10.push_back(f10);
trainingDataf15.push_back(f15);
trainingDataf20.push_back(f20);
trainingLabels.push_back(i); //每一幅字符图片所对应的字符类别索引下标
}
}
trainingDataf5.convertTo(trainingDataf5, CV_32FC1);
trainingDataf10.convertTo(trainingDataf10, CV_32FC1);
trainingDataf15.convertTo(trainingDataf15, CV_32FC1);
trainingDataf20.convertTo(trainingDataf20, CV_32FC1);
Mat(trainingLabels).copyTo(classes);
FileStorage fs("OCR.xml", FileStorage::WRITE);
fs << "TrainingDataF5" << trainingDataf5;
fs << "TrainingDataF10" << trainingDataf10;
fs << "TrainingDataF15" << trainingDataf15;
fs << "TrainingDataF20" << trainingDataf20;
fs << "classes" << classes;
fs.release();
return 0;
}
识别
#include "stdafx.h"
#include <cv.h>
#include <highgui.h>
#include <cvaux.h>
#include <ml.h>
#include <iostream>
#include <vector>
#define HORIZONTAL 1
#define VERTICAL 0
using namespace std;
using namespace cv;
#include <iostream>
#include <vector>
#define HORIZONTAL 1
#define VERTICAL 0
using namespace std;
using namespace cv;
class Plate{
public:
Plate();
Plate(Mat img, Rect pos);
string str();
Rect position;
Mat plateImg;
vector<char> chars;
vector<Rect> charsPos;
};
Plate::Plate(){
}
Plate::Plate(Mat img, Rect pos){
plateImg=img;
position=pos;
}
string Plate::str(){
string result="";
//Order numbers
vector<int> orderIndex;
vector<int> xpositions;
for(int i=0; i< charsPos.size(); i++){
orderIndex.push_back(i);
xpositions.push_back(charsPos[i].x);
}
float min=xpositions[0];
int minIdx=0;
for(int i=0; i< xpositions.size(); i++){
min=xpositions[i];
minIdx=i;
for(int j=i; j<xpositions.size(); j++){
if(xpositions[j]<min){
min=xpositions[j];
minIdx=j;
}
}
int aux_i=orderIndex[i];
int aux_min=orderIndex[minIdx];
orderIndex[i]=aux_min;
orderIndex[minIdx]=aux_i;
float aux_xi=xpositions[i];
float aux_xmin=xpositions[minIdx];
xpositions[i]=aux_xmin;
xpositions[minIdx]=aux_xi;
}
for(int i=0; i<orderIndex.size(); i++){
result=result+chars[orderIndex[i]];
}
return result;
}
CvANN_MLP ann;
const char strCharacters[] = {'0','1','2','3','4','5','6','7','8','9','B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z'};
const int numCharacters=30;
bool verifySizes(Mat r){
//Char sizes 45x77
float aspect=45.0f/77.0f;
float charAspect= (float)r.cols/(float)r.rows;
float error=0.35;
float minHeight=15;
float maxHeight=28;
//We have a different aspect ratio for number 1, and it can be ~0.2
float minAspect=0.2;
float maxAspect=aspect+aspect*error;
//area of pixels
float area=countNonZero(r);
//bb area
float bbArea=r.cols*r.rows;
//% of pixel in area
float percPixels=area/bbArea;
/*if(DEBUG)
cout << "Aspect: "<< aspect << " ["<< minAspect << "," << maxAspect << "] " << "Area "<< percPixels <<" Char aspect " << charAspect << " Height char "<< r.rows << "\n";*/
if(percPixels < 0.8 && charAspect > minAspect && charAspect < maxAspect && r.rows >= minHeight && r.rows < maxHeight)
return true;
else
return false;
}
Mat preprocessChar(Mat in){
//Remap image
int h=in.rows;
int w=in.cols;
int charSize=20; //统一每个字符的大小
Mat transformMat=Mat::eye(2,3,CV_32F);
int m=max(w,h);
transformMat.at<float>(0,2)=m/2 - w/2;
transformMat.at<float>(1,2)=m/2 - h/2;
Mat warpImage(m,m, in.type());
warpAffine(in, warpImage, transformMat, warpImage.size(), INTER_LINEAR, BORDER_CONSTANT, Scalar(0) );
Mat out;
resize(warpImage, out, Size(charSize, charSize) );
return out;
}
//create the accumulation histograms,img is a binary image, t is 水平或垂直
Mat ProjectedHistogram(Mat img, int t)
{
int sz=(t)?img.rows:img.cols;
Mat mhist=Mat::zeros(1,sz,CV_32F);
for(int j=0; j<sz; j++){
Mat data=(t)?img.row(j):img.col(j);
mhist.at<float>(j)=countNonZero(data); //统计这一行或一列中,非零元素的个数,并保存到mhist中
}
//Normalize histogram
double min, max;
minMaxLoc(mhist, &min, &max);
if(max>0)
mhist.convertTo(mhist,-1 , 1.0f/max, 0);//用mhist直方图中的最大值,归一化直方图
return mhist;
}
Mat features(Mat in, int sizeData){
//Histogram features
Mat vhist=ProjectedHistogram(in,VERTICAL);
Mat hhist=ProjectedHistogram(in,HORIZONTAL);
//Low data feature
Mat lowData;
resize(in, lowData, Size(sizeData, sizeData) );
//Last 10 is the number of moments components
int numCols=vhist.cols+hhist.cols+lowData.cols*lowData.cols;
Mat out=Mat::zeros(1,numCols,CV_32F);
//Asign values to feature,ANN的样本特征为水平、垂直直方图和低分辨率图像所组成的矢量
int j=0;
for(int i=0; i<vhist.cols; i++)
{
out.at<float>(j)=vhist.at<float>(i);
j++;
}
for(int i=0; i<hhist.cols; i++)
{
out.at<float>(j)=hhist.at<float>(i);
j++;
}
for(int x=0; x<lowData.cols; x++)
{
for(int y=0; y<lowData.rows; y++){
out.at<float>(j)=(float)lowData.at<unsigned char>(x,y);
j++;
}
}
return out;
}
int classify(Mat f){
int result=-1;
Mat output(1, 30, CV_32FC1); //西班牙车牌只有30种字符
ann.predict(f, output);
Point maxLoc;
double maxVal;
minMaxLoc(output, 0, &maxVal, 0, &maxLoc);
//We need know where in output is the max val, the x (cols) is the class.
return maxLoc.x;
}
void train(Mat TrainData, Mat classes, int nlayers){
Mat layers(1,3,CV_32SC1);
layers.at<int>(0)= TrainData.cols;
layers.at<int>(1)= nlayers;
layers.at<int>(2)= 30;
ann.create(layers, CvANN_MLP::SIGMOID_SYM, 1, 1);
//Prepare trainClases
//Create a mat with n trained data by m classes
Mat trainClasses;
trainClasses.create( TrainData.rows, 30, CV_32FC1 );
for( int i = 0; i < trainClasses.rows; i++ )
{
for( int k = 0; k < trainClasses.cols; k++ )
{
//If class of data i is same than a k class
if( k == classes.at<int>(i) )
trainClasses.at<float>(i,k) = 1;
else
trainClasses.at<float>(i,k) = 0;
}
}
Mat weights( 1, TrainData.rows, CV_32FC1, Scalar::all(1) );
//Learn classifier
ann.train( TrainData, trainClasses, weights );
}
int _tmain(int argc, _TCHAR* argv[])
{
Mat input = imread("test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Plate mplate;
//Read file storage.
FileStorage fs;
fs.open("OCR.xml", FileStorage::READ);
Mat TrainingData;
Mat Classes;
fs["TrainingDataF15"] >> TrainingData;
fs["classes"] >> Classes;
//训练神经网络
train(TrainingData, Classes, 10);
//dealing image and save each character image into vector<CharSegment>
//Threshold input image
Mat img_threshold;
threshold(input, img_threshold, 60, 255, CV_THRESH_BINARY_INV);
Mat img_contours;
img_threshold.copyTo(img_contours);
//Find contours of possibles characters
vector< vector< Point> > contours;
findContours(img_contours,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE); // all pixels of each contours
//Start to iterate to each contour founded
vector<vector<Point> >::iterator itc= contours.begin();
//Remove patch that are no inside limits of aspect ratio and area.
while (itc!=contours.end()) {
//Create bounding rect of object
Rect mr= boundingRect(Mat(*itc));
//rectangle(result, mr, Scalar(255,0,0),2);
//Crop image
Mat auxRoi(img_threshold, mr);
if(verifySizes(auxRoi)){
auxRoi=preprocessChar(auxRoi);
//对每一个小方块,提取直方图特征
Mat f=features(auxRoi,15);
//For each segment feature Classify
int character=classify(f);
mplate.chars.push_back(strCharacters[character]);
mplate.charsPos.push_back(mr);
//printf("%c ",strCharacters[character]);
}
++itc;
}
string licensePlate=mplate.str();
cout<<licensePlate<<endl;
return 0;
}
harris角点
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define PI 3.1415926
using namespace std;
using namespace cv;
class harris
{
private:
cv::Mat cornerStrength; //opencv harris函数检测结果,也就是每个像素的角点响应函数值
cv::Mat cornerTh; //cornerStrength阈值化的结果
cv::Mat localMax; //局部最大值结果
int neighbourhood; //邻域窗口大小
int aperture;//sobel边缘检测窗口大小(sobel获取各像素点x,y方向的灰度导数)
double k;
double maxStrength;//角点响应函数最大值
double threshold;//阈值除去响应小的值
int nonMaxSize;//这里采用默认的3,就是最大值抑制的邻域窗口大小
cv::Mat kernel;//最大值抑制的核,这里也就是膨胀用到的核
public:
harris():neighbourhood(3),aperture(3),k(0.01),maxStrength(0.0),threshold(0.01),nonMaxSize(3){
};
void setLocalMaxWindowsize(int nonMaxSize){
this->nonMaxSize = nonMaxSize;
};
//计算角点响应函数以及非最大值抑制
void detect(const cv::Mat &image){
//opencv自带的角点响应函数计算函数
cv::cornerHarris (image,cornerStrength,neighbourhood,aperture,k);
double minStrength;
//计算最大最小响应值
cv::minMaxLoc (cornerStrength,&minStrength,&maxStrength);
cv::Mat dilated;
//默认3*3核膨胀,膨胀之后,除了局部最大值点和原来相同,其它非局部最大值点被
//3*3邻域内的最大值点取代
cv::dilate (cornerStrength,dilated,cv::Mat());
//与原图相比,只剩下和原图值相同的点,这些点都是局部最大值点,保存到localMax
cv::compare(cornerStrength,dilated,localMax,cv::CMP_EQ);
}
//获取角点图
cv::Mat getCornerMap(double qualityLevel) {
cv::Mat cornerMap;
// 根据角点响应最大值计算阈值
threshold= qualityLevel*maxStrength;
cv::threshold(cornerStrength,cornerTh,
threshold,255,cv::THRESH_BINARY);
// 转为8-bit图
cornerTh.convertTo(cornerMap,CV_8U);
// 和局部最大值图与,剩下角点局部最大值图,即:完成非最大值抑制
cv::bitwise_and(cornerMap,localMax,cornerMap);
return cornerMap;
}
void getCorners(std::vector<cv::Point> &points,
double qualityLevel) {
//获取角点图
cv::Mat cornerMap= getCornerMap(qualityLevel);
// 获取角点
getCorners(points, cornerMap);
}
// 遍历全图,获得角点
void getCorners(std::vector<cv::Point> &points,
const cv::Mat& cornerMap) {
for( int y = 0; y < cornerMap.rows; y++ ) {
const uchar* rowPtr = cornerMap.ptr<uchar>(y);
for( int x = 0; x < cornerMap.cols; x++ ) {
// 非零点就是角点
if (rowPtr[x]) {
points.push_back(cv::Point(x,y));
}
}
}
}
//用圈圈标记角点
void drawOnImage(cv::Mat &image,
const std::vector<cv::Point> &points,
cv::Scalar color= cv::Scalar(255,255,255),
int radius=3, int thickness=2) {
std::vector<cv::Point>::const_iterator it=points.begin();
while (it!=points.end()) {
// 角点处画圈
cv::circle(image,*it,radius,color,thickness);
++it;
}
}
};
int main()
{
cv::Mat image, image1 = cv::imread ("c:\\img\\lft.jpg");
//灰度变换
cv::cvtColor (image1,image,CV_BGR2GRAY);
// 经典的harris角点方法
harris Harris;
// 计算角点
Harris.detect(image);
//获得角点
std::vector<cv::Point> pts;
Harris.getCorners(pts,0.01);
// 标记角点
Harris.drawOnImage(image,pts);
cv::namedWindow ("harris");
cv::imshow ("harris",image);
cv::waitKey (0);
return 0;
}
强角点
#include "opencv2/core/core.hpp"
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
//#include <vector>
using namespace std;
using namespace cv;
Mat src,src_gray;
int maxCorners=1000;
int maxTrackbar=100;
RNG rng(12345);
char* source_window="Image";
void goodFeaturesToTrack_demo(int,void*);
int main()
{
src=imread("c:\\img\\grid.jpg",1);
cvtColor(src,src_gray,CV_BGR2GRAY);
namedWindow(source_window,CV_WINDOW_AUTOSIZE);
createTrackbar("角点个数:",source_window,&maxCorners,maxTrackbar,goodFeaturesToTrack_demo);
imshow(source_window,src);
goodFeaturesToTrack_demo(0,0);
waitKey(0);
return 0;
}
void goodFeaturesToTrack_demo(int,void*)
{
if (maxCorners<1)
{
maxCorners=1;
}
//Shi-Tomasi 角点算法参数定义
vector<Point2f> corners;
double qualityLevel=0.01;//最大最小特征值乘法因子
double minDistance=10;//角点之间最小距离
int blockSize=3;
bool useHarrisDetector=false;
double k=0.04;
Mat copy;
copy=src.clone();
goodFeaturesToTrack(src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k);
cout<<"检测到角点数:"<<corners.size()<<endl;
int r=1;
for (int i=0;i<corners.size();i++)
{
circle(copy,corners[i],r,Scalar(rng.uniform(0,255),rng.uniform(0,255),rng.uniform(0,255)),2,8,0);
}
namedWindow(source_window,CV_WINDOW_AUTOSIZE);
imshow(source_window,copy);
}
fast角点
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
using namespace cv;
void main()
{
Mat image;
image = imread("c:\\img\\grid.jpg");
// vector of keyPoints
std::vector<KeyPoint> keyPoints;
// construction of the fast feature detector object
FastFeatureDetector fast(40); // 检测的阈值为40
// feature point detection
fast.detect(image,keyPoints);
drawKeypoints(image, keyPoints, image, Scalar::all(255), DrawMatchesFlags::DRAW_OVER_OUTIMG);
imshow("FAST feature", image);
cvWaitKey(0);
}
sift
SIFT特征对旋转、尺度缩放、亮度变化等保持不变性,是非常稳定的局部特征,现在应用很广泛。而SIFT算法是将Blob检测,特征矢量生成,特征匹配搜索等步骤结合在一起优化
- SIFT::SIFT(int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double
- 10, double
nfeatures:特征点数目(算法对检测出的特征点排名,返回最好的nfeatures个特征点)。
nOctaveLayers:金字塔中每组的层数(算法中会自己计算这个值,后面会介绍)。
contrastThreshold:过滤掉较差的特征点的对阈值。contrastThreshold越大,返回的特征点越少。
edgeThreshold:过滤掉边缘效应的阈值。edgeThreshold越大,特征点越多(被多滤掉的越少)。
sigma:金字塔第0层图像高斯滤波系数,也就是σ。
- void
- descriptors, bool useProvidedKeypoints=false)
img:8bit灰度图像
mask:图像检测区域(可选)
keypoints:特征向量矩阵
descipotors:特征点描述的输出向量(如果不需要输出,需要传cv::noArray())。
useProvidedKeypoints:是否进行特征点检测。ture,则检测特征点;false,只计算图像特征描述。
#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
#include<vector>
using namespace std;
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
const char* imagename = "c:\\img\\cv.jpg";
//从文件中读入图像
Mat img = imread(imagename);
Mat img2=imread("c:\\img\\cv2.jpg");
//如果读入图像失败
if(img.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
if(img2.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
}
//显示图像
imshow("image before", img);
imshow("image2 before",img2);
//sift特征检测
SiftFeatureDetector siftdtc;
vector<KeyPoint>kp1,kp2;
siftdtc.detect(img,kp1);
Mat outimg1;
drawKeypoints(img,kp1,outimg1);
imshow("image1 keypoints",outimg1);
KeyPoint kp;
vector<KeyPoint>::iterator itvc;
for(itvc=kp1.begin();itvc!=kp1.end();itvc++)
{
cout<<"angle:"<<itvc->angle<<"\t"<<itvc->class_id<<"\t"<<itvc->octave<<"\t"<<itvc->pt<<"\t"<<itvc->response<<endl;
}
siftdtc.detect(img2,kp2);
Mat outimg2;
drawKeypoints(img2,kp2,outimg2);
imshow("image2 keypoints",outimg2);
SiftDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
BruteForceMatcher<L2<float>> matcher;
vector<DMatch> matches;
Mat img_matches;
extractor.compute(img,kp1,descriptor1);
extractor.compute(img2,kp2,descriptor2);
imshow("desc",descriptor1);
cout<<endl<<descriptor1<<endl;
matcher.match(descriptor1,descriptor2,matches);
drawMatches(img,kp1,img2,kp2,matches,img_matches);
imshow("matches",img_matches);
//此函数等待按键,按键盘任意键就返回
waitKey();
return 0;
}
surf
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/features2d/features2d.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2/legacy/legacy.hpp>
using namespace cv;
void readme();
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
Mat img_1 = imread( "c:\\img\\cv.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "c:\\img\\cv2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BruteForceMatcher< L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
waitKey(0);
return 0;
}
/**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
拼接
#include <iostream>
#include <fstream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/stitcher.hpp"
using namespace std;
using namespace cv;
bool try_use_gpu = false;
vector<Mat> imgs;
string result_name = "result.jpg";
//void printUsage();
//int parseCmdArgs(int argc, char** argv);
int main(int argc, char* argv[])
{
Mat img=imread("c:\\img\\1.jpg");
imgs.push_back(img);
img=imread("c:\\img\\2.jpg");
imgs.push_back(img);
img=imread("c:\\img\\3.jpg");
imgs.push_back(img);
Mat pano;
Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
Stitcher::Status status = stitcher.stitch(imgs, pano);
if (status != Stitcher::OK)
{
cout << "Can't stitch images, error code = " << int(status) << endl;
return -1;
}
imwrite(result_name, pano);
imshow(result_name, pano);
waitKey(0);
return 0;
}