航天宏图杯比赛心得:特征提取的一些算法以及PIE与OpenCV结合变成说明

本算法中特征提取通过之前利用最小生成树算法生成的面向对象的图斑来进行特征提取。

也即:特征提取仅仅针对一个对象(图斑)

影像分割是我们把影像分割成了一个一个的小图斑,然后我们的特征提取是对每个图斑进行光谱特征、形状特征、纹理特征的提取。

遥感影像中的地物对象的特征主要包括光谱特征、形状特征以及纹理特征等。面向对象分类主要根据这些特征来识别和分辨地物对象,在影像分类的过程中起着至关重要的作用。在面向对象的整个分类过程中,首先是对遥感影像进行多尺度分割得到影像对象,其次计算这些影像对象的特征,利用这些特征来对影像对象进行分类。

①光谱特征:

光谱特征提取深度学习 什么是光谱特征提取_光谱特征提取深度学习

②形状特征:
形状特征指的是对象区域的几何特征。在高分辨率影像中,我们分割成一个一个的对象,对象的边界明显,通过利用对象的几何特征可以较好的识别和提取地物的信息。

光谱特征提取深度学习 什么是光谱特征提取_特征提取_02

③纹理特征:

纹理指的是遥感影像局部的某些性质,代表着局部间像元与像元之间一种关系的量化,是纹理基元根据一定的规律重新统计排列组合而成的。一般来说,纹理主要有三个要素:一是纹理内部的任何一块区域都有相似的结构特征;二是纹理的内部存在着某种序列性;三是指其序列都是按照一定的规律排列所成。纹理一般分为人工纹理和自然纹理。人工纹理是由点、线、数字等自然背景符号排列组合而成的;自然纹理就是指草地、森林等自然景物所具有的规则排列的现象。纹理特征是指对纹理性质定量描述,常用的纹理特征的描述方法有统计方法和结构方法。目前,使用较多且比较成熟的是统计方法,即通过利用图像统计的特征值描述纹理特征。统计方法主要包含有灰度共生矩阵、分行模型、自相关函数等方法,其中,灰度共生矩阵是现阶段较为成熟有效的提取纹理特征的方法,该方法主要基于估计二阶组合条件概率密度函数。

光谱特征提取深度学习 什么是光谱特征提取_光谱特征提取深度学习_03


光谱特征提取深度学习 什么是光谱特征提取_光谱特征提取深度学习_04

主要特征提取函数如下:

float AttributeGenerator::brightness(vector<Point> pointsVecI, vector<uchar*> buffer, int nXSize,int nXOff,int nYOff) const
{
	float refValue = 0;
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		for (int k = 0; k<buffer.size(); ++k)
		{
			refValue += SRCVAL(buffer[k], _dataType, (pointsVecI[i].y-nYOff) * nXSize + (pointsVecI[i].x-nXOff));
		}
	}

	return  refValue / (buffer.size()*pointsVecI.size());
}

float AttributeGenerator::layer_mean(int k, vector<Point> pointsVecI, vector<uchar*> buffer, int nXSize, int nXOff, int nYOff) const
{
	float refValue = 0;
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		refValue += SRCVAL(buffer[k], _dataType, (pointsVecI[i].y-nYOff) * nXSize + (pointsVecI[i].x-nXOff));
	}
	return    refValue / pointsVecI.size();
}

float AttributeGenerator::layer_stddev(int k, vector<Point> pointsVecI, vector<uchar*> buffer, int nXSize, int nXOff, int nYOff) const
{
	float Mean = 0;
	float refValue = 0;
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		Mean += SRCVAL(buffer[k], _dataType, (pointsVecI[i].y-nYOff) * nXSize + (pointsVecI[i].x-nXOff));
	}
	Mean /= pointsVecI.size();
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		refValue += (Mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)))
			* (Mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)));
	}
	return sqrt(refValue / pointsVecI.size());
}

float AttributeGenerator::skewness(int k, vector<Point> pointsVecI, vector<uchar*> buffer, int nXSize, int nXOff, int nYOff) const
{
	float mean = 0, var = 0;
	float refValue = 0;
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		mean += SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff));
	}
	mean /= pointsVecI.size();
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		var += (mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)))
			* (mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)));
	}
	for (int i = 0; i<pointsVecI.size(); ++i)
	{
		refValue += (mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)))
			* (mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)))
			*(mean - SRCVAL(buffer[k], _dataType, (pointsVecI[i].y - nYOff) * nXSize + (pointsVecI[i].x - nXOff)));
	}
	if (var != 0) {
		refValue /= ((sqrt(var))*(sqrt(var))*(sqrt(var)));
	}
	else
		refValue = 0;
	return refValue;
}

float AttributeGenerator::GLCM(int k, vector<Point> pointsVecI, vector<uchar*> buffer, int nXSize) const
{

	const int SIXTEEN = 16;
	const int blockCount = (buffer[k].maxVal - buffer[k].minVal) / SIXTEEN + 1;
	const double blockOffset = param.buffer.minVal;

	QPoint _offset = estimateOffset(param.buffer.angle);

	QVector<qreal> refValue;

	refValue.clear();
	refValue.resize(_mapHaralickNames["GLCM"].size());

	QVector<QPoint> pointsAnother(param.pointsVecI.size());
	std::transform(param.pointsVecI.begin(), param.pointsVecI.end(), pointsAnother.begin(), [&](const QPoint &pt)->QPoint
	{ return QPoint(pt.x() + _offset.x(), pt.y() + _offset.y()); });

	QVector<double> glcm(SIXTEEN*SIXTEEN, 0);
	int count = 0;
	for (int i = 0; i<param.pointsVecI.size(); ++i)
	{
		if (pointsAnother[i].x() >= 0 && pointsAnother[i].x() < param.nXSize && pointsAnother[i].y() >= 0 && pointsAnother[i].y() < param.nYSize)
		{
			int level_1 = (SRCVAL(param.buffer.buf, param.dataType, param.pointsVecI[i].y() * param.nXSize
				+ param.pointsVecI[i].x()) - blockOffset) / blockCount;
			int level_2 = (SRCVAL(param.buffer.buf, param.dataType, pointsAnother[i].y() * param.nXSize
				+ pointsAnother[i].x()) - blockOffset) / blockCount;
			++glcm[level_1*SIXTEEN + level_2];
			++count;
		}
	}

	if (count == 0)
	{
		return refValue;
	}

	for (int i = 0; i<glcm.size(); ++i)
		glcm[i] /= count;

	double mean1 = 0.0;
	double mean2 = 0.0;
	double var1 = 0.0;
	double var2 = 0.0;
	for (int j = 0; j < SIXTEEN; ++j)
	{
		for (int k = 0; k < SIXTEEN; ++k)
		{
			refValue[0] += glcm[j*SIXTEEN + k] / (1 + (j - k)*(j - k));
			refValue[1] += glcm[j*SIXTEEN + k] * (j - k)*(j - k);
			refValue[2] += glcm[j*SIXTEEN + k] * sqrt(static_cast<double>((j - k)*(j - k)));
			mean1 += glcm[j*SIXTEEN + k] * j;
			mean2 += glcm[j*SIXTEEN + k] * k;
			refValue[5] += fabs(glcm[j*SIXTEEN + k])<0.00001 ? 0 : (-glcm[j*SIXTEEN + k] * log(glcm[j*SIXTEEN + k]));
			refValue[6] += glcm[j*SIXTEEN + k] * glcm[j*SIXTEEN + k];
		}
	}

	for (int j = 0; j < SIXTEEN; ++j)
	{
		for (int k = 0; k < SIXTEEN; ++k)
		{
			var1 += glcm[j*SIXTEEN + k] * (j - mean1)*(j - mean1);
			var2 += glcm[j*SIXTEEN + k] * (k - mean2)*(k - mean2);
		}
	}

	refValue[3] = mean1;
	refValue[4] = sqrt(var1);

	for (int j = 0; j < SIXTEEN; ++j)
	{
		for (int k = 0; k < SIXTEEN; ++k)
		{
			refValue[7] += glcm[j*SIXTEEN + k] * (j - mean1)*(k - mean2) / sqrt(var1*var2);
		}
	}

	return refValue;
}

计算过程如下:

bool AttributeGenerator::computeAttributes(CString output)
{
	string str(output.GetBuffer(output.GetLength()));
	const char*p = str.c_str();

	FILE* fpattr;
	fpattr = fopen(p, "w");
	//fprintf(fpattr, "序号\t像素总数\t亮度\t第一波段均值\t第二波段均值\t第三波段均值\t第一波段标准差\t第二波段标准差\t第三波段标准差\t第一波段倾斜度\t第二波段倾斜度\t第三波段倾斜度\n");
	double adfGeoTransform[6];
	_poImageDS->GetGeoTransform(adfGeoTransform);

	int index = 0;

	vector<double> minBandVal(_bandCount), maxBandVal(_bandCount);
	for (int k = 0; k<_bandCount; ++k)
	{
		double dMinMax[2];
		_poImageDS->GetRasterBand(k + 1)->ComputeRasterMinMax(FALSE, dMinMax);
		minBandVal[k] = dMinMax[0];
		maxBandVal[k] = dMinMax[1];
	}

	int NObjs = _objectCount;
	OGRLayer *poLayer = _poGeometryDS->GetLayer(0);
	if (poLayer == NULL)
	{
		cout << ("Open Shapefile Layer failed!");
		return false;
	}
	OGRFeature *poFeature = NULL;
	poLayer->ResetReading();
	while (NObjs>0 && (poFeature = poLayer->GetNextFeature()) != NULL )
	{

		int ind = poFeature->GetFieldIndex("GridCode");
		index = poFeature->GetFieldAsInteger(ind);


		int nXOff = _ObjetsBoxs[index].xmin;
		int nYOff = _ObjetsBoxs[index].ymin;
		int nXSize = _ObjetsBoxs[index].xmax - nXOff + 1;
		int nYSize = _ObjetsBoxs[index].ymax - nYOff + 1;
		int objectID = index;
		vector<Point> points;
		points = _Objects[index];
		int pixelCount = points.size();

		//cout << "第二个输出:"<<objectID << " " << pixelCount << " " << nXOff << " " << nYOff << " " << nXSize << " " << nYSize << "\n" << endl;

		//vector<Point> pointsVecI(pixelCount);

		// Compute Area and Border_length
		OGRPolygon*     polygon = _geometryObjects[objectID];
		OGRLinearRing*  linearRing = polygon->getExteriorRing();
		double          area = polygon->get_Area();
		double          border_lenghth = linearRing->get_Length();
		int             ringPointCount = linearRing->getNumPoints();
		OGRRawPoint*    ringPoints = new OGRRawPoint[ringPointCount];
		linearRing->getPoints(ringPoints);
		vector<CvPoint2D32f> ringPointsVec(ringPointCount);
		std::transform(ringPoints, ringPoints + ringPointCount, ringPointsVec.begin(), [&](const OGRRawPoint& ptRaw)->Point
		{ return Point(ptRaw.x, ptRaw.y); });
		delete[]ringPoints;

		//buffer
		vector<uchar*> buffer(_bandCount);
		for (int k = 0; k<_bandCount; ++k)
		{
			buffer[k] = new uchar[nXSize*nYSize*_dataSize];
			GDALRasterBand *poBand = _poImageDS->GetRasterBand(k + 1);
			poBand->RasterIO(GF_Read, nXOff, nYOff, nXSize, nYSize, buffer[k], nXSize, nYSize, _dataType, 0, 0);
		}
		//compute attributes

		//compute spectral attributes
		float bright = brightness(points, buffer, nXSize,nXOff,nYOff)*100;

		float mean1 = layer_mean(0, points, buffer, nXSize,nXOff,nYOff)*100;
		float mean2 = layer_mean(1, points, buffer, nXSize, nXOff, nYOff)*100;
		float mean3 = layer_mean(2, points, buffer, nXSize, nXOff, nYOff)*100;
		float mean4 = 0.0;
		if (_bandCount > 3) {
			mean4 = layer_mean(3, points,buffer, nXSize, nXOff, nYOff) * 100;
		}
		float stddev1 = layer_stddev(0, points, buffer, nXSize, nXOff, nYOff)*100;
		float stddev2 = layer_stddev(1, points, buffer, nXSize, nXOff, nYOff)*100;
		float stddev3 = layer_stddev(2, points, buffer, nXSize, nXOff, nYOff)*100;
		float stddev4 = 0.0;
		if (_bandCount > 3) {
			stddev4 = layer_stddev(3, points, buffer, nXSize, nXOff, nYOff) * 100;
		}
		float skewness1 = skewness(0, points, buffer, nXSize, nXOff, nYOff)*100;
		float skewness2 = skewness(1, points, buffer, nXSize, nXOff, nYOff)*100;
		float skewness3 = skewness(2, points, buffer, nXSize, nXOff, nYOff)*100;
		float skewness4 = 0.0;
		if (_bandCount > 3) {
			skewness4 = skewness(3, points, buffer, nXSize, nXOff, nYOff) * 100;
		}

		int nFeature;
		
		nFeature = poFeature->GetFieldIndex("PixelCount");
		poFeature->SetField(nFeature, pixelCount);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("Area");
		poFeature->SetField(nFeature, area);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("Perimeter");
		poFeature->SetField(nFeature, border_lenghth);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("brightness");
		poFeature->SetField(nFeature, bright);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("MeanR");
		poFeature->SetField(nFeature, mean1);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("MeanG");
		poFeature->SetField(nFeature, mean2);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("MeanB");
		poFeature->SetField(nFeature, mean3);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("StddevR");
		poFeature->SetField(nFeature, stddev1);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("StddevG");
		poFeature->SetField(nFeature, stddev2);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("StddevB");
		poFeature->SetField(nFeature, stddev3);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("SkewR");
		poFeature->SetField(nFeature, skewness1);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("SkewG");
		poFeature->SetField(nFeature, skewness2);
		poLayer->SetFeature(poFeature);
		nFeature = poFeature->GetFieldIndex("SkewB");
		poFeature->SetField(nFeature, skewness3);
		poLayer->SetFeature(poFeature);
		if (_bandCount > 3) {
			nFeature = poFeature->GetFieldIndex("SkewNIR");
			poFeature->SetField(nFeature, skewness4);
			poLayer->SetFeature(poFeature);
			nFeature = poFeature->GetFieldIndex("StddevNIR");
			poFeature->SetField(nFeature, stddev4);
			poLayer->SetFeature(poFeature);
			nFeature = poFeature->GetFieldIndex("MeanNIR");
			poFeature->SetField(nFeature, mean4);
			poLayer->SetFeature(poFeature);
		}
		nFeature = poFeature->GetFieldIndex("Label");
		poFeature->SetField(nFeature, 0);
		poLayer->SetFeature(poFeature);
		fprintf(fpattr, "%d\t%d\t%.10f\t%.10f\t%.10f\t%.10f\t%.10f\t%.10f\t%.15f\t%.15f\t%.15f\t%.10f\t%.10f\t%.10f\n", index, pixelCount,area, border_lenghth,bright,mean1,mean2,mean3,stddev1,stddev2,stddev3,skewness1,skewness2,skewness3);
		//layer_mean(k);

		//compute texture attributes


		//compute geom attributes


		for (int k = 0; k<_bandCount; ++k)
			delete[](buffer[k]);
		NObjs--;
	}
	fclose(fpattr);
	OGRErr err = poLayer->SyncToDisk();
	return true;
}

通过计算并写入shp以及txt文件来保存我们的特征提取结果。

与OpenCV结合:

首先在Release版本下利用生成来生成一个exe

光谱特征提取深度学习 什么是光谱特征提取_面向对象_05

然后添加继承类:

class OpenCVTestCommand : DesktopCommand
    {
        /// <summary>
        /// 构造函数
        /// </summary>
        public OpenCVTestCommand()
        {
            this.Caption = "OpenCVTestCommand";
            this.Name = "OpenCVTestCommand";
            
        }
        

        /// <summary>
        /// 单击事件
        /// </summary>
        public override void OnClick()
        {

            Form2 Segmentation = new Form2();
            
            if (Segmentation.ShowDialog() == DialogResult.OK)
            {
                MessageBox.Show("Success");


                string exe_path = "ConsoleApplication2.exe";//"C:\\Users\\PrinceLiu\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\x64\\Release\\ConsoleApplication2.exe";// "Segmentation.exe";// 执行调用exe路径//C:\Users\PrinceLiu\Documents\Visual Studio 2015\Projects\ConsoleApplication2\x64\Release\ConsoleApplication2.exe
                string[] the_args = { Segmentation.textBox1.Text, Segmentation.textBox2.Text, Segmentation.textBox3.Text, Segmentation.textBox4.Text };// exe执行的参数
                
                bool result = StartProcess(exe_path, the_args);
                if (result == false)
                {
                    System.Windows.Forms.MessageBox.Show("执行失败!");
                }

                

            

                Segmentation.Close();
            }
        }
        public bool StartProcess(string exePath, params string[] args)
        {
            string s = "";
            foreach (string arg in args)
            {
                s = s + arg + " ";
            }
            s = s.Trim();
            Process process = new Process();//创建进程对象
            ProcessStartInfo startInfo = new ProcessStartInfo(exePath, s); // 括号里是(程序名,参数)
            process.StartInfo = startInfo;
            process.Start();
            return true;
        }
    }

在exe_path中填入自己生成exe的路径,并且在the_args中填入输入的参数
接着在PIE主界面中添加按钮,在按钮点击时间函数中填入:

private void toolStripButton_Segmentation_Click(object sender, EventArgs e)
        {
            ICommand command = new OpenCVTestCommand(); //调用命令
            command.OnCreate(mapControlMain);
            command.OnClick();
        }

即可成功调用!