我们从下面三个方面来进行相应的数字图像处理

一、图象噪声估计

、图象分割

三、图像压缩及解压

 

一、图象噪声估计

•常见的噪声模型:    

•1、高斯噪声;

•2、瑞利噪声;

•3、伽马噪声;

•4、指数分布噪声;

•5、均匀分布噪声;

•6、脉冲噪声;

opencv研究时受缩放与布局影响怎么办_高斯滤波

图中噪声分别是:

  • •a.高斯噪声分布图;
    •b.瑞利噪声分布图;
    •c.伽马噪声分布图;
    •d.指数噪声分布图;
    •e.均匀噪声分布图;
    •f.脉冲噪声分布图;
  • 我们将用到的库函数是:
CV_EXPORTS_W void blur( InputArray src, OutputArray dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT );(均值滤波函数)
CV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize,double sigmaX, double sigmaY = 0,int borderType = BORDER_DEFAULT );(高斯滤波函数)
CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );(中值滤波函数)
CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,double sigmaColor, double sigmaSpace,int borderType = BORDER_DEFAULT );(双边滤波函数)
CV_EXPORTS_W int getOptimalDFTSize(int vecsize);(得到最佳IDFT变换尺寸函数)
CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);(dft变换函数)
CV_EXPORTS void calcHist( const Mat* images, int nimages,const int* channels, InputArray mask,OutputArray hist, int dims, const int* histSize,const float** ranges, bool uniform = true, bool accumulate = false );(计算直方图函数)

这里我们准备了五种滤波器,分别是均值滤波器,高斯滤波器,中值滤波器,双边滤波器,频域高斯滤波器(主要是滤一些周期噪声)

  • 图像噪声的估计流程如下:
  • 相应的程序如下:
/*均值滤波函数*/
Mat Blurefilterfunction(Mat &img)
{
	namedWindow("均值滤波【效果图】", CV_WINDOW_AUTOSIZE);

	Mat out;
	blur(img, out,Size(7, 7)); /*这里选用7*7的内核进行均值滤波*/
	imshow("均值滤波【效果图】", out);

	return out;
}
/*高斯滤波函数*/
Mat GaussianBlurfilterfunction(Mat &img)
{
	namedWindow("高斯滤波【效果图】", CV_WINDOW_AUTOSIZE);

	Mat out;
	GaussianBlur(img, out, Size(3, 3),0,0); /*这里选用3*3的内核进行高斯滤波*/
	imshow("高斯滤波【效果图】", out);

	return out;
}
/*中值滤波函数*/
Mat MedianBlurfilterfunction(Mat &img)
{
	namedWindow("中值滤波【效果图】", CV_WINDOW_AUTOSIZE);

	Mat out;
	medianBlur(img, out, 7); 
	imshow("中值滤波【效果图】", out);

	return out;
}
/*双边滤波函数*/
Mat BilateralBlurfilterfunction(Mat &img)
{
	namedWindow("双边滤波【效果图】", CV_WINDOW_AUTOSIZE);

	Mat out;
	bilateralFilter(img, out, 25, 25 * 2, 25 / 2);
	imshow("双边滤波【效果图】", out);

	return out;
}
/*高斯频域低通滤波器*/
//**************************************
//频率域滤波——以高斯低通为例
//**************************************
Mat gaussianlbrf(Mat scr, float sigma);//高斯低通滤波器函数
Mat freqfilt(Mat scr, Mat blur);//频率域滤波函数
Mat FrequencyDomainGaussFiltering(Mat &input)
{
	//Mat input = imread("p3-00-01.tif", CV_LOAD_IMAGE_GRAYSCALE);
	int w = getOptimalDFTSize(input.cols);  //获取进行dtf的最优尺寸
	int h = getOptimalDFTSize(input.rows); //获取进行dtf的最优尺寸
	Mat padded;
	copyMakeBorder(input, padded, 0, h - input.rows, 0, w - input.cols, BORDER_CONSTANT, Scalar::all(0));  //边界填充
	padded.convertTo(padded, CV_32FC1); //将图像转换为flaot型
	Mat gaussianKernel = gaussianlbrf(padded, 30);//高斯低通滤波器
	Mat out = freqfilt(padded, gaussianKernel);//频率域滤波
	imshow("结果图 sigma=30", out);
	return out;
}
//*****************高斯低通滤波器***********************
Mat gaussianlbrf(Mat scr, float sigma)
{
	Mat gaussianBlur(scr.size(), CV_32FC1); //,CV_32FC1
	float d0 = 2 * sigma*sigma;//高斯函数参数,越小,频率高斯滤波器越窄,滤除高频成分越多,图像就越平滑
	for (int i = 0; i < scr.rows; i++)
	{
		for (int j = 0; j < scr.cols; j++)
		{
			float d = pow(float(i - scr.rows / 2), 2) + pow(float(j - scr.cols / 2), 2);//分子,计算pow必须为float型
			gaussianBlur.at<float>(i, j) = expf(-d / d0);//expf为以e为底求幂(必须为float型)
		}
	}
	imshow("高斯低通滤波器", gaussianBlur);
	return gaussianBlur;
}
//*****************频率域滤波*******************
Mat freqfilt(Mat scr, Mat blur)
{
	//***********************DFT*******************
	Mat plane[] = { scr, Mat::zeros(scr.size() , CV_32FC1) }; //创建通道,存储dft后的实部与虚部(CV_32F,必须为单通道数)
	Mat complexIm;
	merge(plane, 2, complexIm);//合并通道 (把两个矩阵合并为一个2通道的Mat类容器)
	dft(complexIm, complexIm);//进行傅立叶变换,结果保存在自身  
	//***************中心化********************
	split(complexIm, plane);//分离通道(数组分离)
	int cx = plane[0].cols / 2; int cy = plane[0].rows / 2;//以下的操作是移动图像  (零频移到中心)
	Mat part1_r(plane[0], Rect(0, 0, cx, cy));  //元素坐标表示为(cx,cy)
	Mat part2_r(plane[0], Rect(cx, 0, cx, cy));
	Mat part3_r(plane[0], Rect(0, cy, cx, cy));
	Mat part4_r(plane[0], Rect(cx, cy, cx, cy));
	Mat temp;
	part1_r.copyTo(temp);  //左上与右下交换位置(实部)
	part4_r.copyTo(part1_r);
	temp.copyTo(part4_r);
	part2_r.copyTo(temp);  //右上与左下交换位置(实部)
	part3_r.copyTo(part2_r);
	temp.copyTo(part3_r);
	Mat part1_i(plane[1], Rect(0, 0, cx, cy));  //元素坐标(cx,cy)
	Mat part2_i(plane[1], Rect(cx, 0, cx, cy));
	Mat part3_i(plane[1], Rect(0, cy, cx, cy));
	Mat part4_i(plane[1], Rect(cx, cy, cx, cy));
	part1_i.copyTo(temp);  //左上与右下交换位置(虚部)
	part4_i.copyTo(part1_i);
	temp.copyTo(part4_i);
	part2_i.copyTo(temp);  //右上与左下交换位置(虚部)
	part3_i.copyTo(part2_i);
	temp.copyTo(part3_i);
	//*****************滤波器函数与DFT结果的乘积****************
	Mat blur_r, blur_i, BLUR;
	multiply(plane[0], blur, blur_r); //滤波(实部与滤波器模板对应元素相乘)
	multiply(plane[1], blur, blur_i);//滤波(虚部与滤波器模板对应元素相乘)
	Mat plane1[] = { blur_r, blur_i };
	merge(plane1, 2, BLUR);//实部与虚部合并
	  //*********************得到原图频谱图***********************************
	magnitude(plane[0], plane[1], plane[0]);//获取幅度图像,0通道为实部通道,1为虚部,因为二维傅立叶变换结果是复数  
	plane[0] += Scalar::all(1);  //傅立叶变o换后的图片不好分析,进行对数处理,结果比较好看 
	log(plane[0], plane[0]);    // float型的灰度空间为[0,1])
	normalize(plane[0], plane[0], 1, 0, CV_MINMAX);  //归一化便于显示
	imshow("原图像频谱图", plane[0]);
	idft(BLUR, BLUR);	//idft结果也为复数
	split(BLUR, plane);//分离通道,主要获取通道
	magnitude(plane[0], plane[1], plane[0]);  //求幅值(模)
	normalize(plane[0], plane[0], 1, 0, CV_MINMAX);  //归一化便于显示
	return plane[0];//返回参数
}

/*绘制直方图函数*/
void Drawing_one_dimensionalhistogram(Mat &img)
{
	/*输入图像转化为灰度图像*/
	//cvtColor(img, img, CV_BGR2GRAY);
	/*定义变量*/
	MatND dstHist;
	int dims = 1;
	float hranges[] = { 0,255 };
	const float *ranges[] = { hranges };   /*这里需要为const类型*/
	int size = 256;
	int channels = 0;

	/*计算图像的直方图*/
	calcHist(&img, 1, &channels, Mat(), dstHist, dims, &size, ranges);
	int scale = 1;

	Mat dstImage(size * scale, size, CV_8U, Scalar(0));
	/*获取最大值和最小值*/
	double minValue = 0;
	double maxValue = 0;
	minMaxLoc(dstHist, &minValue, &maxValue, 0, 0);
	/*绘制出直方图*/
	int hpt = saturate_cast<int>(0.9 * size);
	for (int i = 0; i < 256; i++)
	{
		float binValue = dstHist.at<float>(i);
		int realValue = saturate_cast<int>(binValue * hpt / maxValue);
		rectangle(dstImage, Point(i*scale, size - 1), Point((i + 1)*scale - 1, size - realValue), Scalar(255));
	}
	imshow("图像的一维直方图", dstImage);
}

例如下列图片的直方图:

  • 从上到下分别为瑞利噪声(周期噪声),高斯噪声,均匀噪声(周期噪声)
  • 首先我们先对瑞利噪声在空域滤波,效果图如下:
  • 频域滤波,效果图如下:
  • 对于高斯噪声,我们只需要对其高斯滤波即可,但是这里需要进行相应的参数调节,效果图如下:
  • 最后一个均匀噪声,也属于周期噪声的一种,这里我们需要对其在频域滤波,效果图如下:
  • 、图象分割
  • 大津分割流程图:
  • 我们使用到的库函数为:
CV_EXPORTS_W double threshold( InputArray src, OutputArray dst,double thresh, double maxval, int type );(阀值化函数)

相应的程序如下:

/*计算大津分割的阈值*/
int OtsuAlgThreshold(const Mat image)
{
	if (image.channels() != 1)
	{
		cout << "Please input Gray-image!" << endl;
		return 0;
	}
	int T = 0; //Otsu算法阈值
	double varValue = 0; //类间方差中间值保存
	double w0 = 0; //前景像素点数所占比例
	double w1 = 0; //背景像素点数所占比例
	double u0 = 0; //前景平均灰度
	double u1 = 0; //背景平均灰度
	double Histogram[256] = { 0 }; //灰度直方图,下标是灰度值,保存内容是灰度值对应的像素点总数
	int Histogram1[256] = { 0 };
	uchar *data = image.data;
	double totalNum = image.rows*image.cols; //像素总数
	//计算灰度直方图分布,Histogram数组下标是灰度值,保存内容是灰度值对应像素点数
	for (int i = 0; i < image.rows; i++)   //为表述清晰,并没有把rows和cols单独提出来
	{
		for (int j = 0; j < image.cols; j++)
		{
			Histogram[data[i*image.step + j]]++;
			Histogram1[data[i*image.step + j]]++;
		}

	}
	//***********画出图像直方图********************************
	Mat image1(255, 255, CV_8UC3);
	for (int i = 0; i < 255; i++)
	{
		Histogram1[i] = Histogram1[i] % 200;
		line(image1, Point(i, 235), Point(i, 235 - Histogram1[i]), Scalar(255, 0, 0), 1, 8, 0);
		if (i % 50 == 0)
		{
			char ch[255];
			sprintf_s(ch, "%d", i);
			string str = ch;
			putText(image1, str, Point(i, 250), 1, 1, Scalar(0, 0, 255));
		}
	}

	//***********画出图像直方图********************************
	for (int i = 0; i < 255; i++)
	{
		//每次遍历之前初始化各变量
		w1 = 0;	
		u1 = 0;	
		w0 = 0;
		u0 = 0;
		//***********背景各分量值计算**************************
		for (int j = 0; j <= i; j++) //背景部分各值计算
		{
			w1 += Histogram[j];  //背景部分像素点总数
			u1 += j * Histogram[j]; //背景部分像素总灰度和
		}
		if (w1 == 0) //背景部分像素点数为0时退出
		{
			break;
		}
		u1 = u1 / w1; //背景像素平均灰度
		w1 = w1 / totalNum; // 背景部分像素点数所占比例
		//***********背景各分量值计算**************************
		//***********前景各分量值计算**************************
		for (int k = i + 1; k < 255; k++)
		{
			w0 += Histogram[k];  //前景部分像素点总数
			u0 += k * Histogram[k]; //前景部分像素总灰度和
		}
		if (w0 == 0) //前景部分像素点数为0时退出
		{
			break;
		}
		u0 = u0 / w0; //前景像素平均灰度
		w0 = w0 / totalNum; // 前景部分像素点数所占比例
		//***********前景各分量值计算**************************
		//***********类间方差计算******************************
		double varValueI = w0 * w1*(u1 - u0)*(u1 - u0); //当前类间方差计算
		if (varValue < varValueI)
		{
			varValue = varValueI;
			T = i;
		}
	}
	//画出以T为阈值的分割线
	line(image1, Point(T, 235), Point(T, 0), Scalar(0, 0, 255), 2, 8);
	imshow("直方图", image1);
	return T;
}

//***************Otsu算法通过求类间方差极大值求自适应阈值*****************
Mat Otsusegmentation(Mat &img)
{
	cvtColor(img, img, CV_RGB2GRAY);
	Mat imageOutput;
	Mat imageOtsu;
	int thresholdValue = OtsuAlgThreshold(img);
	cout << "类间方差为: " << thresholdValue << endl;
	threshold(img, imageOutput, thresholdValue, 255, CV_THRESH_BINARY);
	threshold(img, imageOtsu, 0, 255, CV_THRESH_OTSU); //Opencv Otsu算法;
	imshow("大津分割【效果图】", imageOutput);
	imshow("Opencv Otsu【效果图】", imageOtsu);
	return imageOutput;
}

首先我们先计算一张图片的阈值,得到其最小类方差:

  • 然后进行相应的阈值值化操作:
  • 图像分割还有一种称为交互式分割,这里我们直接使用库函数:
CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, InputOutputArray bgdModel, InputOutputArray fgdModel,  int iterCount, int mode = GC_EVAL );
img:待分割的源图像,必须是8位3通道(CV_8UC3)图像,在处理的过程中不会被修改;    
mask:掩码图像,大小和原图像一致。可以有如下几种取值: GC_BGD(=0),背景; GC_FGD(=1),前景; GC_PR_BGD(=2),可能的背景; GC_PR_FGD(=3),可能的前景。 
rect:用于限定需要进行分割的图像范围,只有该矩形窗口内的图像部分才被处理;
 bgdModel:背景模型,如果为null,函数内部会自动创建一个bgdModel;
 fgdModel:前景模型,如果为null,函数内部会自动创建一个fgdModel;
 iterCount:迭代次数,必须大于0; 
mode:用于指示grabCut函数进行什么操作。可以有如下几种选择: GC_INIT_WITH_RECT(=0),用矩形窗初始化GrabCut; GC_INIT_WITH_MASK(=1),用掩码图像初始化GrabCut; GC_EVAL(=2),执行分割。

代码如下:

/*grath_cut*/
void grathcutfunctiong(Mat &img)
{
	Mat bgModel, fgModel, mask;
	Rect rect;
	rect.x = 50;
	rect.y = 45;
	rect.width = img.cols - (rect.x << 1);
	rect.height = img.rows - (rect.y << 1);
	rectangle(img, rect, Scalar(0, 0, 255), 3, 8, 0);//用矩形画矩形窗  

	//循环执行3次,这个可以自己设置
	grabCut(img, mask, rect, bgModel, fgModel, 3, GC_INIT_WITH_RECT);
	compare(mask, GC_PR_FGD, mask, CMP_EQ);
	Mat foreground(img.size(), CV_8UC3, Scalar(255, 255, 255));
	img.copyTo(foreground, mask);
	imshow("foreground", foreground);
}

相应的效果图如下:

  • 这里再附一个分水岭算法的代码,有兴趣的可以试一试:
Mat g_maskImage, g_scrImage;
Point prevPt(-1, -1);
static void ShowHelpText();
static void on_Mouse(int event, int x, int y, int flags, void*);
/*分水岭算法*/
void Watershedfunction(Mat &g_scrImage)
{
	/*初始化掩膜和灰度图*/
	Mat scrImage, grayImage;
	grayImage.copyTo(scrImage);
	cvtColor(g_scrImage, g_maskImage, COLOR_BGR2GRAY);
	cvtColor(g_maskImage, grayImage, COLOR_GRAY2BGR);
	g_maskImage = Scalar::all(0);
	/*设置鼠标回调函数*/
	setMouseCallback(WINDOW_NAME, on_Mouse, 0);
	/*轮询按键,进行处理*/
	while (1)
	{
		/*获取键值*/
		int c = waitKey(0);
		/*若按键值为ESC时,退出*/
		if ((char)c == 27)
		{
			break;
		}
		/*若按键值为2时,恢复原图*/
		if ((char)c == '2')
		{
			g_maskImage = Scalar::all(0);
			scrImage.copyTo(g_scrImage);
			imshow("image",g_scrImage);
		}
		/*若按键值为1或者空格,则进行处理*/
		if ((char)c == '1' || (char)c == ' ')
		{
			/*定义一些参数*/
			int i, j, compCount = 0;
			vector<vector<Point>> contours;
			vector<Vec4i> hierarchy;

			/*寻找轮廓*/
			findContours(g_maskImage, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
			/*轮廓为空时的处理*/
			if (contours.empty())
				continue;
			/*复制掩膜*/
			Mat maskImage(g_maskImage.size(), CV_32S);
			maskImage = Scalar::all(0);
			/*循环绘制出轮廓*/
			for (int index = 0; index > 0; index = hierarchy[index][0], compCount++)
				drawContours(maskImage, contours, index, Scalar::all(compCount + 1), -1, 8, hierarchy, INT_FAST16_MAX);
			/*compCount为0时的处理*/
			if (compCount == 0)
				continue;
			/*生成随机颜色*/
			vector<Vec3b> colorTab;
			for (i = 0; i < compCount; i++)
			{
				int b = theRNG().uniform(0, 255);
				int g = theRNG().uniform(0, 255);
				int r = theRNG().uniform(0, 255);

				colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
			}
			/*计算处理时间并输出到窗口中*/
			double dTime = (double)getTickCount();
			watershed(scrImage, maskImage);
			dTime = (double)getTickCount() - dTime;
			printf("\t处理时间 = %gms\n",dTime*1000./getTickFrequency());

			/*双层循环,将分水岭图像遍历存入watershedImage中*/
			Mat watershedImage(maskImage.size(), CV_8UC3);
			for(i=0;i<maskImage.rows;i++)
				for (j = 0; j < maskImage.cols; j++)
				{
					int index = maskImage.at<int>(i, j);
					if (index == -1)
						watershedImage.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
					else if (index <= 0 || index > compCount)
						watershedImage.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
					else
						watershedImage.at<Vec3b>(i, j) = colorTab[index - 1];
				}
			watershedImage = watershedImage * 0.5 + grayImage * 0.5;
			imshow("watershed transform",watershedImage);
		}
	}
}
static void on_Mouse(int event, int x, int y, int flags, void *)
{
	/*处理鼠标不在窗口中的情况*/
	if (x < 0 || x >= g_scrImage.cols || y < 0 || y >= g_maskImage.rows)
		return;
	/*数理鼠标左键相关信息*/
	if (event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON))
		prevPt = Point(-1, -1);
	else if (event == EVENT_LBUTTONDOWN)
		prevPt = Point(x, y);
	/*鼠标左键按下并移动,绘制出白色线条*/
	else if (event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON))
	{
		Point pt(x, y);
		if (prevPt.x < 0)
			prevPt = pt;
		line(g_maskImage, prevPt, pt, Scalar::all(255), 5, 8, 0);
		line(g_scrImage, prevPt, pt, Scalar::all(255), 5, 8, 0);
		prevPt = pt;
		imshow(WINDOW_NAME,g_scrImage);
	}
}

三、图像压缩及解压

  • (1)、霍夫曼编码压缩图片:
  • 其流程图如下:
  • 代码如下:
/*哈夫曼编码*/
//几个全局变量,存放读入图像的位图数据、宽、高、颜色表及每像素所占位数(比特) 
//此处定义全局变量主要为了后面的图像数据访问及图像存储作准备
unsigned char *pBmpBuf;//读入图像数据的指针
int bmpWidth;//图像的宽
int bmpHeight;//图像的高
int imgSpace;//图像所需空间
RGBQUAD *pColorTable;//颜色表指针
int biBitCount;//图像类型
char str[100];//文件名称 
int Num[300];//各灰度值出现的次数 
float Feq[300];//各灰度值出现的频率 
unsigned char *lpBuf;//指向图像像素的指针
unsigned char *m_pDib;//存放打开文件的DIB
int NodeNum;	//Huffman树总节点个数
int NodeStart;	//Huffman树起始节点
struct Node 
{		//Huffman树节点
	int color;		//记录叶子节点的灰度值(非叶子节点为 -1)
	int lson, rson;	//节点的左右儿子(若没有则为 -1)
	int num;		//节点的数值(编码依据)
	int mark;		//记录节点是否被用过(用过为1,没用过为0)
}node[600];
char CodeStr[300][300];	//记录编码值
int CodeLen[300];		//编码长度
bool ImgInf[8000000];	//图像信息
int InfLen;				//图像信息长度
/***********************************************************************
* 函数名称:
* readBmp()
*
*函数参数:
*  char *bmpName -文件名字及路径
*
*返回值:
*   0为失败,1为成功
*
*说明:给定一个图像文件名及其路径,读图像的位图数据、宽、高、颜色表及每像素
*      位数等数据进内存,存放在相应的全局变量中
***********************************************************************/
bool readBmp(char *bmpName)
{
	//二进制读方式打开指定的图像文件
	FILE *fp = fopen(bmpName, "rb");
	if (fp == 0)
	{
		printf("未找到指定文件!\n");
		return 0;
	}
	//跳过位图文件头结构BITMAPFILEHEADER
	fseek(fp, sizeof(BITMAPFILEHEADER), 0);
	//定义位图信息头结构变量,读取位图信息头进内存,存放在变量head中
	BITMAPINFOHEADER head;
	fread(&head, sizeof(BITMAPINFOHEADER), 1, fp);
	//获取图像宽、高、每像素所占位数等信息
	bmpWidth = head.biWidth;
	bmpHeight = head.biHeight;
	biBitCount = head.biBitCount;
	//定义变量,计算图像每行像素所占的字节数(必须是4的倍数)
	int lineByte = (bmpWidth * biBitCount / 8 + 3) / 4 * 4;
	//灰度图像有颜色表,且颜色表表项为256
	if (biBitCount == 8)
	{
		//申请颜色表所需要的空间,读颜色表进内存
		pColorTable = new RGBQUAD[256];
		fread(pColorTable, sizeof(RGBQUAD), 256, fp);
	}
	//申请位图数据所需要的空间,读位图数据进内存
	pBmpBuf = new unsigned char[lineByte * bmpHeight];
	fread(pBmpBuf, 1, lineByte * bmpHeight, fp);
	//关闭文件
	fclose(fp);
	return 1;
}
/***********************************************************************
保存信息
***********************************************************************/
//二进制转十进制
int Change2to10(int pos) 
{
	int i, j, two = 1;
	j = 0;
	for (i = pos + 7; i >= pos; i--) 
	{
		j += two * ImgInf[i];
		two *= 2;
	}
	return j;
}
//保存Huffman编码树
int saveInfo(char *writePath, int lineByte) 
{
	int i, j ;
	FILE *fout;
	fout = fopen(writePath, "w");
	fprintf(fout, "%d %d %d\n", NodeStart, NodeNum, InfLen);//输出起始节点、节点总数、图像所占空间
	for (i = 0; i < NodeNum; i++) 
	{		//输出Huffman树
		fprintf(fout, "%d %d %d\n", node[i].color, node[i].lson, node[i].rson);
	}
	fclose(fout);
	return 0;
}
//保存文件
bool saveBmp(char *bmpName, unsigned char *imgBuf, int width, int height,
	int biBitCount, RGBQUAD *pColorTable)
{

	//如果位图数据指针为0,则没有数据传入,函数返回
	if (!imgBuf)
		return 0;
	//颜色表大小,以字节为单位,灰度图像颜色表为1024字节,彩色图像颜色表大小为0
	int colorTablesize = 0;
	if (biBitCount == 8)
		colorTablesize = 1024;
	//待存储图像数据每行字节数为4的倍数
	int lineByte = (width * biBitCount / 8 + 3) / 4 * 4;
	//以二进制写的方式打开文件
	FILE *fp = fopen(bmpName, "wb");
	if (fp == 0) return 0;
	//申请位图文件头结构变量,填写文件头信息
	BITMAPFILEHEADER fileHead;
	fileHead.bfType = 0x4D42;//bmp类型
	//bfSize是图像文件4个组成部分之和
	fileHead.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + colorTablesize + lineByte * height;
	fileHead.bfReserved1 = 0;
	fileHead.bfReserved2 = 0;
	//bfOffBits是图像文件前三个部分所需空间之和
	fileHead.bfOffBits = 54 + colorTablesize;
	//写文件头进文件
	fwrite(&fileHead, sizeof(BITMAPFILEHEADER), 1, fp);
	//申请位图信息头结构变量,填写信息头信息
	BITMAPINFOHEADER head;
	head.biBitCount = biBitCount;
	head.biClrImportant = 0;
	head.biClrUsed = 0;
	head.biCompression = 0;
	head.biHeight = height;
	head.biPlanes = 1;
	head.biSize = 40;
	head.biSizeImage = lineByte * height;
	head.biWidth = width;
	head.biXPelsPerMeter = 0;
	head.biYPelsPerMeter = 0;
	//写位图信息头进内存
	fwrite(&head, sizeof(BITMAPINFOHEADER), 1, fp);
	//如果灰度图像,有颜色表,写入文件 
	if (biBitCount == 8)
		fwrite(pColorTable, sizeof(RGBQUAD), 256, fp);
	//写位图数据进文件
	fwrite(imgBuf, InfLen / 8, 1, fp);
	//关闭文件
	fclose(fp);
	return 1;
}
/*********************
Huffman编码图像解码
*********************/
//读入编码图像
bool readHuffman(char *Name)
{
	int i;
	char NameStr[100];
	//读取Huffman编码信息和编码树
	strcpy(NameStr, Name);
	strcat(NameStr, ".bpt");
	FILE *fin = fopen(NameStr, "r");
	if (fin == 0) 
	{
		printf("未找到指定文件!\n");
		return 0;
	}
	fscanf(fin, "%d %d %d", &NodeStart, &NodeNum, &InfLen);
	//printf("%d %d %d\n",NodeStart,NodeNum,InfLen);
	for (i = 0; i < NodeNum; i++)
	{
		fscanf(fin, "%d %d %d", &node[i].color, &node[i].lson, &node[i].rson);
	}
	//二进制读方式打开指定的图像文件
	strcpy(NameStr, Name);
	strcat(NameStr, ".bhd");
	FILE *fp = fopen(NameStr, "rb");
	if (fp == 0)
	{
		printf("未找到指定文件!\n");
		return 0;
	}
	//跳过位图文件头结构BITMAPFILEHEADER
	fseek(fp, sizeof(BITMAPFILEHEADER), 0);
	//定义位图信息头结构变量,读取位图信息头进内存,存放在变量head中
	BITMAPINFOHEADER head;
	fread(&head, sizeof(BITMAPINFOHEADER), 1, fp);
	//获取图像宽、高、每像素所占位数等信息
	bmpWidth = head.biWidth;
	bmpHeight = head.biHeight;
	biBitCount = head.biBitCount;
	//定义变量,计算图像每行像素所占的字节数(必须是4的倍数)
	int lineByte = (bmpWidth * biBitCount / 8 + 3) / 4 * 4;
	//灰度图像有颜色表,且颜色表表项为256
	if (biBitCount == 8) {
		//申请颜色表所需要的空间,读颜色表进内存
		pColorTable = new RGBQUAD[256];
		fread(pColorTable, sizeof(RGBQUAD), 256, fp);
	}
	//申请位图数据所需要的空间,读位图数据进内存
	pBmpBuf = new unsigned char[lineByte * bmpHeight];
	fread(pBmpBuf, 1, InfLen / 8, fp);
	//关闭文件
	fclose(fp);
	return 1;
}
void HuffmanDecode()
{
	//获取编码信息
	int i, j, tmp;
	int lineByte = (bmpWidth * biBitCount / 8 + 3) / 4 * 4;
	for (i = 0; i < InfLen / 8; i++)
	{
		j = i * 8 + 7;
		tmp = *(pBmpBuf + i);
		while (tmp > 0)
		{
			ImgInf[j] = tmp % 2;
			tmp /= 2;
			j--;
		}
	}
	//解码
	int p = NodeStart;	//遍历指针位置
	j = 0;
	i = 0;
	do
	{
		if (node[p].color >= 0)
		{
			*(pBmpBuf + j) = node[p].color;
			j++;
			p = NodeStart;
		}
		if (ImgInf[i] == 1)
			p = node[p].lson;
		else if (ImgInf[i] == 0)
			p = node[p].rson;
		i++;
	} while (i <= InfLen);
}
/*********************
Huffman编码
*********************/
//Huffman编码初始化
void HuffmanCodeInit()
{
	int i;
	for (i = 0; i < 256; i++)//灰度值记录清零
		Num[i] = 0;
	//初始化哈夫曼树
	for (i = 0; i < 600; i++)
	{
		node[i].color = -1;
		node[i].lson = node[i].rson = -1;
		node[i].num = -1;
		node[i].mark = 0;
	}
	NodeNum = 0;
}
//深搜遍历Huffman树获取编码值
char CodeTmp[300];
void dfs(int pos, int len)
{
	//遍历左儿子
	if (node[pos].lson != -1)
	{
		CodeTmp[len] = '1';
		dfs(node[pos].lson, len + 1);
	}
	else
	{
		if (node[pos].color != -1)
		{
			CodeLen[node[pos].color] = len;
			CodeTmp[len] = '\0';
			strcpy(CodeStr[node[pos].color], CodeTmp);
		}
	}
	//遍历右儿子
	if (node[pos].lson != -1)
	{
		CodeTmp[len] = '0';
		dfs(node[pos].rson, len + 1);
	}
	else {
		if (node[pos].color != -1)
		{
			CodeLen[node[pos].color] = len;
			CodeTmp[len] = '\0';
			strcpy(CodeStr[node[pos].color], CodeTmp);
		}
	}
}
//寻找值最小的节点
int MinNode()
{
	int i, j = -1;
	for (i = 0; i < NodeNum; i++)
		if (!node[i].mark)
			if (j == -1 || node[i].num < node[j].num)
				j = i;
	if (j != -1)
	{
		NodeStart = j;
		node[j].mark = 1;
	}
	return j;
}
//编码主函数
void HuffmanCode()
{
	int i, j, k, a, b;
	for (i = 0; i < 256; i++)
	{//创建初始节点
		Feq[i] = (float)Num[i] / (float)(bmpHeight * bmpWidth);//计算灰度值频率
		if (Num[i] > 0)
		{
			node[NodeNum].color = i;
			node[NodeNum].num = Num[i];
			node[NodeNum].lson = node[NodeNum].rson = -1;	//叶子节点无左右儿子
			NodeNum++;
		}
	}
	while (1)
	{	//找到两个值最小的节点,合并成为新的节点
		a = MinNode();
		if (a == -1)
			break;
		b = MinNode();
		if (b == -1)
			break;
		//构建新节点
		node[NodeNum].color = -1;
		node[NodeNum].num = node[a].num + node[b].num;
		node[NodeNum].lson = a;
		node[NodeNum].rson = b;
		NodeNum++;
	}
	//根据建好的Huffman树编码(深搜实现)
	dfs(NodeStart, 0);
	//屏幕输出编码
	int sum = 0;
	printf("Huffman编码信息如下:\n");
	for (i = 0; i < 256; i++)
		if (Num[i] > 0)
		{
			sum += CodeLen[i] * Num[i];
			printf("灰度值:%3d  频率: %f  码长: %2d  编码: %s\n",i,Feq[i],CodeLen[i],CodeStr[i]);
		}
			printf("原始总码长:%d\n",bmpWidth * bmpHeight * 8);
	 		printf("Huffman编码总码长:%d\n",sum);
			printf("压缩比:%.3f : 1\n",(float)(bmpWidth * bmpHeight * 8) / (float)sum);
	for (i = 0; i < 256; i++)
		if (Num[i] > 0)
		{
			sum += CodeLen[i] * Num[i];
		}
			//记录图像信息
	InfLen = 0;
	int lineByte = (bmpWidth * biBitCount / 8 + 3) / 4 * 4;
	for (i = 0; i < bmpHeight; i++)
		for (j = 0; j < bmpWidth; j++)
		{
			lpBuf = (unsigned char *)pBmpBuf + lineByte * i + j;
			for (k = 0; k < CodeLen[*(lpBuf)]; k++)
			{
				ImgInf[InfLen++] = (int)(CodeStr[*(lpBuf)][k] - '0');
			}
		}
	//再编码数据
	j = 0;
	for (i = 0; i < InfLen;)
	{
		*(pBmpBuf + j) = Change2to10(i);
		i += 8;
		j++;
	}
}
/******************************
主函数
******************************/
void Huffmanfunction()
{
	int ord;//命令 
	char c;
	int i, j;
	clock_t start, finish;
	int total_time;
	//	CString str;
	while (1)
	{
		printf("本程序提供以下功能\n\n\t1.256色灰度BMP图像Huffman编码\n\t2.Huffman编码BMP文件解码\n\t3.退出\n\n请选择需要执行的命令:");
		scanf("%d%c", &ord, &c);
		if (ord == 1)
		{
			printf("\n---256色灰度BMP图像Huffman编码---\n");
			printf("\n请输入要编码图像名称:");
			scanf("%s", str);
			//读入指定BMP文件进内存
			char readPath[100];
			strcpy(readPath, str);
			strcat(readPath, ".bmp");
			if (readBmp(readPath))
			{
				int lineByte = (bmpWidth * biBitCount / 8 + 3) / 4 * 4;
				if (biBitCount == 8)
				{
					//编码初始化
					HuffmanCodeInit();
					//计算每个灰度值出现的次数 
					for (i = 0; i < bmpHeight; i++)
						for (j = 0; j < bmpWidth; j++)
						{
							lpBuf = (unsigned char *)pBmpBuf + lineByte * i + j;
							Num[*(lpBuf)] += 1;
						}
					//调用编码
					start = clock();
					HuffmanCode();
					finish = clock();
					total_time = (finish - start);
					printf("识别一张耗时: %d毫秒", total_time);
					//将图像数据存盘
					char writePath[100];
					//保存编码后的bmp
					strcpy(writePath, str);
					strcat(writePath, "_Huffman.bhd");
					saveBmp(writePath, pBmpBuf, bmpWidth, bmpHeight, biBitCount, pColorTable);
					//保存Huffman编码信息和编码树
					strcpy(writePath, str);
					strcat(writePath, "_Huffman.bpt");
					saveInfo(writePath, lineByte);
					printf("\n编码完成!编码信息保存在 %s_Huffman 文件中\n\n", str);
				}
				else
				{
					printf("本程序只支持256色BMP编码!\n");
				}
				//清除缓冲区,pBmpBuf和pColorTable是全局变量,在文件读入时申请的空间
				delete[]pBmpBuf;
				if (biBitCount == 8)
					delete[]pColorTable;
			}
			printf("\n-----------------------------------------------\n\n\n");
		}
		else if (ord == 2)
		{
			printf("\n---Huffman编码BMP文件解码---\n");
			printf("\n请输入要解码文件名称:");
			scanf("%s", str);
			//编码解码初始化
			HuffmanCodeInit();
			if (readHuffman(str))
			{	//读取文件
				HuffmanDecode();	//Huffman解码
				//将图像数据存盘
				char writePath[100];
				//保存解码后的bmp
				strcpy(writePath, str);
				strcat(writePath, "_Decode.bmp");
				InfLen = bmpWidth * bmpHeight * 8;
				saveBmp(writePath, pBmpBuf, bmpWidth, bmpHeight, biBitCount, pColorTable);
				system(writePath);
				printf("\n解码完成!保存为 %s_Decode.bmp\n\n", str);
			}
			printf("\n-----------------------------------------------\n\n\n");
		}
		else if (ord == 3)
			break;
	}
}
  • 效果图如下:
  •  
  • opencv研究时受缩放与布局影响怎么办_灰度_02

  • (2)、使用DCT变换进行图像压缩
  • 其流程图如下:
  • 代码如下:
/*DCT压缩*/
void blkproc_DCT(Mat);	//功能等同于Matlab的blkproc函数(blkproc(I,[8 8],'P1*x*P2',g,g')),用于对图像做8*8分块DCT
Mat blkproc_IDCT(Mat);	//功能等同于Matlab的blkproc函数(blkproc(I,[8 8],'P1*x*P2',g',g)),用于做8*8分块DCT逆变换,恢复原始图像
void regionalCoding(Mat);	//区域编码函数,功能等同于Matlab的blkproc函数(blkproc(I1,[8 8],'P1.*x',a))
void thresholdCoding(Mat);	//阈值编码函数,功能等同于Matlab的blkproc函数(blkproc(I1,[8 8],'P1.*x',a))
double get_medianNum(Mat &);	//获取矩阵的中值,用于阈值编码

void ImageCompression_DCT(Mat &ucharImg)
{
	//Mat ucharImg = imread("3.jpg", 0);	//以灰度图的形式读入原始的图像
	//imshow("srcImg", ucharImg);
	Mat doubleImg,OrignalImg;
	OrignalImg = ucharImg.clone();
	ucharImg.convertTo(doubleImg, CV_64F);	//将原始图像转换成double类型的图像,方便后面的8*8分块DCT变换
	blkproc_DCT(doubleImg);	//对原图片做8*8分块DCT变换
	//分别进行区域编码和阈值编码
	Mat doubleImgRegion, doubleImgThreshold;
	doubleImgRegion = doubleImg.clone();
	doubleImgThreshold = doubleImg.clone();
	regionalCoding(doubleImgRegion);	//对DCT变换后的系数进行区域编码
	thresholdCoding(doubleImgThreshold);	//对DCT变换后的系数进行阈值编码
	//进行逆DCT变换
	Mat ucharImgRegion, ucharImgThreshold, differenceimage;
	ucharImgRegion = blkproc_IDCT(doubleImgRegion);
	//namedWindow("RegionalCoding", CV_WINDOW_AUTOSIZE);
	imshow("RegionalCoding", ucharImgRegion);
	ucharImgThreshold = blkproc_IDCT(doubleImgThreshold);
	//namedWindow("ThresholdCoding", CV_WINDOW_AUTOSIZE);
	imshow("ThresholdCoding", ucharImgThreshold);
	absdiff(ucharImgRegion, OrignalImg, differenceimage);
	imshow("两幅图像的差值",differenceimage);

}



void blkproc_DCT(Mat doubleImgTmp)
{
	Mat ucharImgTmp;
	Mat DCTMat = Mat(8, 8, CV_64FC1);	//用于DCT变换的8*8的矩阵
	Mat DCTMatT;	//DCTMat矩阵的转置
	Mat ROIMat = Mat(8, 8, CV_64FC1);	//用于分块处理的时候在原图像上面移动
	double a = 0, q;	//DCT变换的系数
	for (int i = 0; i < DCTMat.rows; i++)
	{
		for (int j = 0; j < DCTMat.cols; j++)
		{
			if (i == 0)
			{
				a = pow(1.0 / DCTMat.rows, 0.5);
			}
			else
			{
				a = pow(2.0 / DCTMat.rows, 0.5);
			}
			q = ((2 * j + 1)*i*M_PI) / (2 * DCTMat.rows);
			DCTMat.at<double>(i, j) = a * cos(q);
		}
	}
	DCTMatT = DCTMat.t();
	//ROIMat在doubleImgTmp以8为步长移动,达到与Matlab中的分块处理函数blkproc相同的效果
	//此程序中,若图片的高或者宽不是8的整数倍的话,最后的不足8的部分不进行处理
	int rNum = doubleImgTmp.rows / 8;
	int cNum = doubleImgTmp.cols / 8;
	for (int i = 0; i < rNum; i++)
	{
		for (int j = 0; j < cNum; j++)
		{
			ROIMat = doubleImgTmp(Rect(j * 8, i * 8, 8, 8));
			ROIMat = DCTMat * ROIMat*DCTMatT;
		}
	}
	doubleImgTmp.convertTo(ucharImgTmp, CV_8U);
	imshow("DCTImg", ucharImgTmp);
}
Mat blkproc_IDCT(Mat doubleImgTmp)

{
	//与blkproc_DCT几乎一样,唯一的差别在于:ROIMat = DCTMatT*ROIMat*DCTMat(转置矩阵DCTMatT和DCTMat交换了位置)
	Mat ucharImgTmp;
	Mat DCTMat = Mat(8, 8, CV_64FC1);
	Mat DCTMatT;
	Mat ROIMat = Mat(8, 8, CV_64FC1);
	double a = 0, q;
	for (int i = 0; i < DCTMat.rows; i++)
	{
		for (int j = 0; j < DCTMat.cols; j++)
		{
			if (i == 0)
			{
				a = pow(1.0 / DCTMat.rows, 0.5);
			}
			else
			{
				a = pow(2.0 / DCTMat.rows, 0.5);
			}
			q = ((2 * j + 1)*i*M_PI) / (2 * DCTMat.rows);
			DCTMat.at<double>(i, j) = a * cos(q);
		}
	}
	DCTMatT = DCTMat.t();
	int rNum = doubleImgTmp.rows / 8;
	int cNum = doubleImgTmp.cols / 8;
	for (int i = 0; i < rNum; i++)
	{
		for (int j = 0; j < cNum; j++)
		{
			ROIMat = doubleImgTmp(Rect(j * 8, i * 8, 8, 8));
			ROIMat = DCTMatT * ROIMat*DCTMat;
		}
	}
	doubleImgTmp.convertTo(ucharImgTmp, CV_8U);
	return ucharImgTmp;
}
void regionalCoding(Mat doubleImgTmp)
{
	int rNum = doubleImgTmp.rows / 8;
	int cNum = doubleImgTmp.cols / 8;
	Mat ucharImgTmp;
	Mat ROIMat = Mat(8, 8, CV_64FC1);	//用于分块处理的时候在原图像上面移动
	for (int i = 0; i < rNum; i++)
	{
		for (int j = 0; j < cNum; j++)
		{
			ROIMat = doubleImgTmp(Rect(j * 8, i * 8, 8, 8));
			for (int r = 0; r < ROIMat.rows; r++)
			{
				for (int c = 0; c < ROIMat.cols; c++)
				{
					//8*8块中,后四行置0
					if (r > 4)
					{
						ROIMat.at<double>(r, c) = 0.0;
					}
				}
			}
		}
	}
	doubleImgTmp.convertTo(ucharImgTmp, CV_8U);
	imshow("regionalCodingImg", ucharImgTmp);
}
void thresholdCoding(Mat doubleImgTmp)
{
	int rNum = doubleImgTmp.rows / 8;
	int cNum = doubleImgTmp.cols / 8;
	double medianNumTmp = 0;
	Mat ucharImgTmp;
	Mat ROIMat = Mat(8, 8, CV_64FC1);	//用于分块处理的时候在原图像上面移动
	for (int i = 0; i < rNum; i++)
	{
		for (int j = 0; j < cNum; j++)
		{
			ROIMat = doubleImgTmp(Rect(j * 8, i * 8, 8, 8));
			medianNumTmp = get_medianNum(ROIMat);
			for (int r = 0; r < ROIMat.rows; r++)
			{
				for (int c = 0; c < ROIMat.cols; c++)
				{
					if (abs(ROIMat.at<double>(r, c)) < 0)
					{
						ROIMat.at<double>(r, c) = 0;
					}
				}
			}
		}
	}
	doubleImgTmp.convertTo(ucharImgTmp, CV_8U);
	imshow("thresholdCodingImg", ucharImgTmp);
}
double get_medianNum(Mat & imageROI)	//获取矩阵的中值
{
	vector<double> vectorTemp;
	double tmpPixelValue = 0;
	for (int i = 0; i < imageROI.rows; i++)	//将感兴趣区域矩阵拉成一个向量
	{
		for (int j = 0; j < imageROI.cols; j++)
		{
			vectorTemp.push_back(abs(imageROI.at<double>(i, j)));
		}
	}
	for (int i = 0; i < vectorTemp.size() / 2; i++)	//进行排序
	{
		for (int j = i + 1; j < vectorTemp.size(); j++)
		{
			if (vectorTemp.at(i) > vectorTemp.at(j))
			{
				double temp;
				temp = vectorTemp.at(i);
				vectorTemp.at(i) = vectorTemp.at(j);
				vectorTemp.at(j) = temp;
			}
		}
	}
	return vectorTemp.at(vectorTemp.size() / 2 - 1);	//返回中值
}

效果图如下:

  • (3)、图像JPEG的压缩:
  • 所用到的库函数:
CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );

程序如下:

/*JPEG图片压缩*/
double getPSNR(Mat& src1, Mat& src2, int bb = 0);
void ImageCompression_JPEG(Mat &src)
{
	//Mat src = imread("lena.png");
	cout << "origin image size: " << src.dataend - src.datastart << endl;
	cout << "height: " << src.rows << endl << "width: " << src.cols << endl << "depth: " << src.channels() << endl;
	cout << "height*width*depth: " << src.rows*src.cols*src.channels() << endl << endl;
	//(1) jpeg compression
	vector<uchar> buff;//buffer for coding
	vector<int> param = vector<int>(2);
	param[0] = CV_IMWRITE_JPEG_QUALITY;
	param[1] = 1;//default(95) 0-100
	imencode(".jpg", src, buff, param);
	cout << "coded file size(jpg): " << buff.size() << endl;//fit buff size automatically.
	Mat jpegimage = imdecode(Mat(buff), CV_LOAD_IMAGE_COLOR);
	//(2) png compression
	param[0] = CV_IMWRITE_PNG_COMPRESSION;
	param[1] = 8;//default(3)  0-9.
	imencode(".png", src, buff, param);
	cout << "coded file size(png): " << buff.size() << endl;
	Mat pngimage = imdecode(Mat(buff), CV_LOAD_IMAGE_COLOR);
	//(3) intaractive jpeg compression
	char name[64];
	namedWindow("jpg");
	int q = 95;
	createTrackbar("quality", "jpg", &q, 100);
	int key = 0;
	while (key != 'q')
	{
		param[0] = CV_IMWRITE_JPEG_QUALITY;
		param[1] = q;
		imencode(".jpg", src, buff, param);
		Mat show = imdecode(Mat(buff), CV_LOAD_IMAGE_COLOR);
		double psnr = getPSNR(src, show);//get PSNR
		double bpp = 8.0*buff.size() / (show.size().area());//bit/pixe;
		sprintf_s(name, "quality:%03d, %.1fdB, %.2fbpp", q, psnr, bpp);
		putText(show, name, Point(15, 50), FONT_HERSHEY_SIMPLEX, 1, CV_RGB(255, 255, 255), 2);
		imshow("jpg", show);
		key = waitKey(33);
		if (key == 's')
		{
			//(4) data writing 
			sprintf_s(name, "q%03d_%.2fbpp.png", q, bpp);
			imwrite(name, show);
			sprintf_s(name, "q%03d_%.2fbpp.jpg", q, bpp);
			param[0] = CV_IMWRITE_JPEG_QUALITY;
			param[1] = q;
			imwrite(name, src, param);;
		}
	}
}
double getPSNR(Mat& src1, Mat& src2, int bb)
{
	int i, j;
	double sse, mse, psnr;
	sse = 0.0;
	Mat s1, s2;
	cvtColor(src1, s1, CV_BGR2GRAY);
	cvtColor(src2, s2, CV_BGR2GRAY);
	int count = 0;
	for (j = bb; j < s1.rows - bb; j++)
	{
		uchar* d = s1.ptr(j);
		uchar* s = s2.ptr(j);
		for (i = bb; i < s1.cols - bb; i++)
		{
			sse += ((d[i] - s[i])*(d[i] - s[i]));
			count++;
		}
	}
	if (sse == 0.0 || count == 0)
	{
		return 0;
	}
	else
	{
		mse = sse / (double)(count);
		psnr = 10.0*log10((255 * 255) / mse);
		return psnr;
	}
}

效果图如下:

  • 主函数如下:
#pragma warning(disable:4996)
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
#include "fstream"
#include "regex"
#include "string.h"
#include "time.h"
#include "Windows.h"
#include "math.h"
#include "stdio.h"
#define WINDOW_NAME  "(【程序窗口1】)"
#define MAX_CHARS 256//最大字符数
#define MAX_SIZE 1000000
#define M_PI 3.141592653
using namespace cv;
using namespace std;
const double EPS = 1e-12;
const double PI = 3.141592653;

class tmpNode
{
public:
	unsigned char uch;//取值范围是0~255,用来表示一个8位的字符
	unsigned long freq = 0;//字符出现的频率
};
int main()
{
	//Mat img = imread("3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	//Mat img = imread("3.jpg", 1);
	//Mat img = imread("3.jpg",0);
	//Mat img = imread("1.jpg");
    //
	//Mat img_1 = imread("p3-01-00_Huffman_Decode.bmp");
	//Mat differenceimage;
	//absdiff(img, img_1, differenceimage);
	/*if (img.empty())
	{
		std::cout << "图片读取失败!" << "\n";
		return -1;
	}*/

	//namedWindow("【原始图像】", CV_WINDOW_AUTOSIZE);
	//imshow("【原始图像】", img);

	//namedWindow("【压缩后解码图像】", CV_WINDOW_AUTOSIZE);
	//imshow("【压缩后解码图像】", img_1);
	
	/*计算直方图判断噪声并且选择相应的滤波器进行滤波*/
	//Mat dstImage = GaussianBlurfilterfunction(img);
	//Mat dstImage = Blurefilterfunction(img);
	//Mat dstImage = MedianBlurfilterfunction(img);
	//Mat dstImage = BilateralBlurfilterfunction(img);
	//Mat dstImage = Inversefiltering(img);
	//namedWindow("【逆滤波图像】", CV_WINDOW_AUTOSIZE);
	//imshow("【逆滤波图像】", dstImage);
	//Mat img1 = img.clone();
	//Drawing_one_dimensionalhistogram(img1);
	//Otsusegmentation(img);
	//Graphcutfunction(img);
	//Watershedfunction(img);
	//ImageCompression();
	//ImageCompression_DCT(img);
	//ImageCompression_JPEG(img);
	//Huffmanfunction();
	//Mat dstImg = FrequencyDomainGaussFiltering(img);
	//Drawing_one_dimensionalhistogram(dstImg);
	//grathcutfunctiong(img);
	//namedWindow("【原始图像】", CV_WINDOW_AUTOSIZE);
	//imshow("【原始图像】", img);
	
	//namedWindow("【差值图像】", CV_WINDOW_AUTOSIZE);
	//imshow("【差值图像】", differenceimage);
	waitKey(0);
	//destroyAllWindows();
	return 0;
}

完。