meanshift跟踪算法

meanshift算法用于视觉跟踪时,将基于前一图像中的对象的颜色直方图在新图像中创建置信度图,并使用均值平移来找到靠近对象旧位置的置信度图的峰值。 置信度图是新图像上的概率密度函数,为新图像的每个像素指定一个概率,该概率是前一图像中的对象中出现的像素颜色的概率。

meanshift跟踪算法步骤:
① 选择搜索窗口,包括窗口的初始位置、大小、形状(对称或歪斜,矩形或圆心)、类型(均匀、多项式、指数或高斯);
② 计算窗口的重心;
③ 将窗口的中心设置在计算出的重心处;
④ 返回②步,直到窗口位置不再变化。
 

计算色彩投影图(反向投影)

(1)为了减少光照变化对目标跟踪的影响,首先将图像从RGB颜色空间转换到HSV颜色空间;

(2)对H分量进行直方图统计,直方图代表了不同H分量取值出现的概率,或者说可以据此查找出H分量的大小为x时的概率或像素个数,即,得到颜色概率查找表;

(3)将图像中每个像素的值用其颜色出现的概率进行替换,由此得到颜色概率分布图;

 以上三个步骤称之为反向投影,需要提醒的是,颜色概率分布图是一个灰度图像;

meanshift寻优

前面提到过meanShift算法是一种非参数概率密度估计方法,它通过不断迭代计算得到最优搜索窗口的位置和大小。

camshift跟踪算法

前面提到,camshift其实就是在视频序列的每一帧当中都运用meanShift,并将上一帧的meanshift结果作为下一帧的初始值,如此不断循环迭代,就可以实现目标的跟踪了。

meanshift:

#include "core/core.hpp"    
#include "highgui/highgui.hpp"    
#include "imgproc/imgproc.hpp"
#include "video/tracking.hpp"
#include<iostream>    

using namespace cv;
using namespace std;

Mat image;
Mat rectImage;
Mat imageCopy; //绘制矩形框时用来拷贝原图的图像  
bool leftButtonDownFlag = false; //左键单击后视频暂停播放的标志位  
Point originalPoint; //矩形框起点  
Point processPoint; //矩形框终点  

Mat targetImageHSV;
int histSize = 200;
float histR[] = { 0,255 };
const float *histRange = histR;
int channels[] = { 0,1 };
Mat dstHist;
Rect rect;
vector<Point> pt; //保存目标轨迹
void onMouse(int event, int x, int y, int flags, void* ustc); //鼠标回调函数  

int main(int argc, char*argv[])
{
	VideoCapture video;
	image = video.open("D:/testimage/CXK.avi");
	//VideoCapture video(0);
	//double fps = video.get(CAP_PROP_FPS); //获取视频帧率  
	//double pauseTime = 1000 / fps; //两幅画面中间间隔  
	namedWindow("目标跟踪", 0);
	setMouseCallback("目标跟踪", onMouse);
	while (video.read(image))//(true)
	{
		int k = waitKey(30);
		if (!leftButtonDownFlag) //判定鼠标左键没有按下,采取播放视频,否则暂停  
		{
			//video >> image;
			video.read(image);

		}

		if (leftButtonDownFlag) //判定鼠标左键没有按下,采取播放视频,否则暂停  
		{

			//while (waitKey(0) != 32)
			waitKey(0);

		}

		//if (!image.data || waitKey(pauseTime) == 27)  //图像为空或Esc键按下退出播放  
		//{
		//	break;
		//}

		if (originalPoint != processPoint && !leftButtonDownFlag)
		{
			Mat imageHSV;
			Mat calcBackImage;
			cvtColor(image, imageHSV, COLOR_RGB2HSV);
			calcBackProject(&imageHSV, 2, channels, dstHist, calcBackImage, &histRange);  //反向投影
			TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.001);
			meanShift(calcBackImage, rect, criteria);
			Mat imageROI = imageHSV(rect);   //更新模板			
			targetImageHSV = imageHSV(rect);
			calcHist(&imageROI, 2, channels, Mat(), dstHist, 1, &histSize, &histRange);
			normalize(dstHist, dstHist, 0.0, 1.0, NORM_MINMAX);   //归一化
			rectangle(image, rect, Scalar(0, 0, 255), 3);  	//目标绘制	
			pt.push_back(Point(rect.x + rect.width / 2, rect.y + rect.height / 2));
			for (int i = 0; i<pt.size() - 1; i++)
			{
				line(image, pt[i], pt[i + 1], Scalar(0, 255, 0), 2.5);
			}
		}
		imshow("目标跟踪", image);
		waitKey(100);
	}
	return 0;
}


//*******************************************************************//    
//鼠标回调函数    



void onMouse(int event, int x, int y, int flags, void *ustc)
{
	if (event == EVENT_LBUTTONDOWN)
	{
		leftButtonDownFlag = true; //标志位  
		originalPoint = Point(x, y);  //设置左键按下点的矩形起点  
		processPoint = originalPoint;
	}
	if (event == EVENT_MOUSEMOVE && leftButtonDownFlag)
	{
		imageCopy = image.clone();
		processPoint = Point(x, y);
		if (originalPoint != processPoint)
		{
			//在复制的图像上绘制矩形  
			rectangle(imageCopy, originalPoint, processPoint, Scalar(0, 0, 255), 2);
		}
		imshow("目标跟踪", imageCopy);
	}
	if (event == EVENT_LBUTTONUP)
	{
		leftButtonDownFlag = false;
		rect = Rect(originalPoint, processPoint);
		rectImage = image(rect); //子图像显示  
		imshow("Sub Image", rectImage);
		cvtColor(rectImage, targetImageHSV, COLOR_RGB2HSV);
		imshow("targetImageHSV", targetImageHSV);
		calcHist(&targetImageHSV, 2, channels, Mat(), dstHist, 1, &histSize, &histRange, true, false);
		normalize(dstHist, dstHist, 0, 255, NORM_MINMAX);
		imshow("dstHist", dstHist);
	}
}

 

弱小运动目标检测算法 微小运动目标检测_目标跟踪

camshift:

#include<opencv2/opencv.hpp>
#include<opencv2/tracking.hpp>
using namespace cv;

int main()
{
	VideoCapture capture;
	Mat frame;
	//保存目标轨迹  
	std::vector<Point> pt;
	//capture.open(0);
	frame = capture.open("D:/testimage/video/fish.mp4");
	if (!capture.isOpened())
	{
		printf("can not open camera \n");
		return -1;
	}
	namedWindow("input", WINDOW_AUTOSIZE);
	namedWindow("output", WINDOW_AUTOSIZE);
	capture.read(frame);
	if (frame.empty())
		return -1;
	Rect2d first = selectROI("output", frame);
	Rect selectionROI;
	selectionROI.width = first.width;
	selectionROI.height = first.height;
	selectionROI.x = first.x;
	selectionROI.y = first.y;
	printf("x= %d, y=%d, width=%d, height=%d", selectionROI.x, selectionROI.y, selectionROI.width, selectionROI.height);

	Mat mask, hist, backproject;
	int bins = 120;
	Mat drawImg = Mat::zeros(300, 300, CV_8UC3);

	while (capture.read(frame))
	{
		Mat hsvimage;
		cvtColor(frame, hsvimage, CV_BGR2HSV);
		inRange(hsvimage, Scalar(25, 43, 46), Scalar(35, 256, 256), mask);
		Mat hue = Mat(hsvimage.size(), hsvimage.depth());
		int channels[] = { 0, 0 };
		mixChannels(&hsvimage, 1, &hue, 1, channels, 1);

		//ROI直方图计算
		Mat roi(hue, first);
		Mat maskroi(mask, first);
		float hrange[] = { 0, 180 };
		const float* hranges = hrange;
		//直方图
		calcHist(&roi, 1, 0, maskroi, hist, 1, &bins, &hranges);
		normalize(hist, hist, 0, 255, NORM_MINMAX);


		int binw = drawImg.cols / bins;
		Mat colorIndex = Mat(1, bins, CV_8UC3);
		for (int i = 0; i < bins; i++)
		{
			colorIndex.at<Vec3b>(0, i) = Vec3b(saturate_cast<uchar>(i * 180 / bins), 255, 255);
		}
		cvtColor(colorIndex, colorIndex, COLOR_HSV2BGR);
		for (int i = 0; i < bins; i++)
		{
			int val = saturate_cast<int>(hist.at<float>(i)*drawImg.rows / 255);
			rectangle(drawImg, Point(i*binw, drawImg.rows), Point((i + 1)*binw, drawImg.rows * val), Scalar(colorIndex.at<Vec3b>(0, i)), -1, 8, 0);
		}

		//计算直方图的反投影
		calcBackProject(&hue, 1, 0, hist, backproject, &hranges);
		backproject &= mask;
		RotatedRect trackBox = CamShift(backproject, selectionROI, TermCriteria((TermCriteria::COUNT | TermCriteria::EPS), 10, 1));
		Rect rect;
		rect.x = trackBox.center.x - trackBox.size.width / 2.0;
		rect.y = trackBox.center.y - trackBox.size.height / 2.0;
		rect.width = trackBox.size.width;
		rect.height = trackBox.size.height;

		rectangle(frame, rect, Scalar(255, 255, 0), 3);

		pt.push_back(Point(rect.x + rect.width / 2, rect.y + rect.height / 2));
		for (int i = 0; i<pt.size() - 1; i++)
		{
			line(frame, pt[i], pt[i + 1], Scalar(0, 255, 0), 2.5);
		}
		imshow("input", frame);
		imshow("output", drawImg);
		waitKey(10);
	}

	capture.release();
	return 0;
}