GPU版本:
#include <opencv2/highgui.hpp>
#include <opencv2/cudaimgproc.hpp>
cv::Mat img = cv::imread("image.png", IMREAD_GRAYSCALE);
cv::cuda::GpuMat dst, src;
src.upload(img);
cv::Ptr<cv::cuda::CLAHE> ptr_clahe = cv::cuda::createCLAHE(5.0, cv::Size(8, 8));
ptr_clahe->apply(src, dst);
cv::Mat result;
dst.download(result);
cv::imshow("result", result);
cv::waitKey();
读图片、创建矩阵,按照像素处理图片:
#include <opencv2/opencv.hpp>
using namespace cv;
int main() {
// cv::Mat img = cv::imread("C:\\Users\\86135\\Desktop\\1.png");
cv::Mat img = cv::Mat(618,618,CV_8UC3,cv::Scalar(255,255,255));
for(int i=0;i<img.rows;i++)
{
for(int j=0;j<img.cols;j++)
{
img.at<cv::Vec3b>(i,j)[0] = 0;//将第一个通道的所有像素赋值为0
img.at<cv::Vec3b>(i,j)[1] = 0;
img.at<cv::Vec3b>(i,j)[2] = 255;
}
}
cv::imshow("1",img);
cv::waitKey(0);
return 0;
}
裁剪图片:
cv::Mat img = image(cv::Range(16,872), cv::Range(4359,5209));//分别为y方向的范围和x方向的范围
通过切分通道赋值:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
// cv::Mat img = cv::imread("C:\\Users\\86135\\Desktop\\1.png");
cv::Mat img = cv::Mat(618,618,CV_8UC3,cv::Scalar(255,255,255));
vector<cv::Mat> ms;
//通道切割
cv::split(img,ms);//对img进行分割并赋值给ms
ms[0] = cv::Scalar(0);
ms[1] = cv::Scalar(255);
ms[2] = cv::Scalar(0);
//合并通道并赋值给img
cv::merge(ms,img);
cv::imshow("1",img);
cv::waitKey(0);
return 0;
}
读取视频:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
VideoCapture cap;
cap = VideoCapture("D:\\test.mp4");
while (true)
{
Mat fram;
cap>>fram;
imshow("fram",fram);
waitKey(1);
}
cap.release();
destroyAllWindows();
return 0;
}
转灰度图、高亮处理:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png");
Mat dst;
// cvtColor(img,dst,COLOR_BGR2GRAY);
convertScaleAbs(img,dst,2,0);//高亮处理,2为倍数
imshow("",dst);
waitKey(0);
destroyAllWindows();
return 0;
}
绘制图形:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png");
//画线
line(img,Point(100,30),Point(210,180),Scalar(0,0,255),3);
//画圆
circle(img,Point(100,100),20,Scalar(255,0,0),3);
//椭圆
ellipse(img,Point(200,200),Point(100,50),0,0,360,Scalar(0,255,0),3);
//矩形
rectangle(img,Point(0,0),Point(100,100),Scalar(0,0,200),3);//图片,起始点,终点,颜色,线宽
imshow("",img);
waitKey(0);
destroyAllWindows();
return 0;
}
画多边形:
#include <vector>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png");
vector<Point> pts;
pts.push_back(Point(10,50));
pts.push_back(Point(20,50));
pts.push_back(Point(70,130));
pts.push_back(Point(130,150));
polylines(img,pts, true,Scalar(0,255,0),2,LINE_AA);
imshow("",img);
waitKey(0);
return 0;
}
写字:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png");
putText(img,"hello,world",Point (500,100),FONT_HERSHEY_SIMPLEX,2,Scalar (255,0,0),3);
imshow("",img);
waitKey(0);
return 0;
}
二值化:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|THRESH_OTSU);
//自适应阈值
adaptiveThreshold(img,bi_img,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,11,2);
adaptiveThreshold(img,bi_img,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,11,2);
imshow("",bi_img);
waitKey(0);
return 0;
}
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
int best_thresh(cv::Mat image)
{
cv::Mat image_re = image.reshape(1);
double zmax,zmin; // 最大值,最小值
cv::Point minIdx, maxIdx; // 最小值坐标,最大值坐标
cv::minMaxLoc(image_re, &zmin, &zmax, &minIdx, &maxIdx);
float tk = (zmax + zmin) / 2; // 设置初始阈值
//根据阈值将图像进行分割为前景和背景,分别求出两者的平均灰度zo和zb
int b = 1;
int m = image.size[0];
int n = image.size[1];
while (b == 0)
{
int ifg = 0;
int ibg = 0;
int fnum = 0;
int bnum = 0;
for (int i = 1 ; i < m ; i++)
{
for (int j = 1; j < n; j++)
{
int tmp = image.at<uchar>(i, j);
if (tmp > tk)
{
ifg = ifg + 1;
fnum = fnum + tmp;
}
else
{
ibg = ibg + 1;
bnum = bnum + tmp;
}
}
}
//计算前景和背景的平均值
int zo = int(fnum / ifg);
int zb = int(bnum / ibg);
if (tk == int((zo + zb) / 2))
{
b = 0;
}
else
{
tk = int((zo + zb) / 2);
}
}
return tk;
}
int main()
{
cv::Mat image = cv::imread("C:\\Users\\10623\\Pictures\\676ee268cdbb4b14af9f41a83ea83384.png");
cv::Mat bi_img;
int thresh = best_thresh(image);
std::cout << thresh;
cv::threshold(image, bi_img, thresh,255, cv::THRESH_BINARY);
cv::imwrite("C:\\Users\\10623\\Pictures\\bi.jpg",bi_img);
return 0;
}
图像运算:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
// Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat x = (Mat_<uchar>(2,1)<<250,34);
Mat y = (Mat_<uchar>(2,1)<<10,100);
Mat add,sub;
cv::add(x,y,add);
cv::subtract(x,y,sub);
cout<<add<<endl;
cout<<sub<<endl;
// imshow("",bi_img);
// waitKey(0);
return 0;
}
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img1 = imread("C:\\Users\\86135\\Desktop\\1.png");
Mat img2 = imread("C:\\Users\\86135\\Desktop\\2.png");
resize(img1, img1, Size(800, 150), 4, 4, 2);
resize(img2, img2, Size(800, 150), 4, 4, 2);
Mat dst;
addWeighted(img1,0.7,img2,0.3,0,dst);
imshow("",dst);
waitKey(0);
return 0;
}
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img1 = imread("C:\\Users\\86135\\Desktop\\1.png");
Mat img2 = imread("C:\\Users\\86135\\Desktop\\2.png");
resize(img1, img1, Size(800, 150), 4, 4, 2);
resize(img2, img2, Size(800, 150), 4, 4, 2);
Mat dst1,dst2,dst3,dst4;
bitwise_and(img1,img2,dst1);
bitwise_not(img1,img2,dst2);
bitwise_or(img1,img2,dst3);
bitwise_xor(img1,img2,dst4);
imshow("1",dst1);
imshow("2",dst2);
imshow("3",dst3);
imshow("4",dst4);
waitKey(0);
return 0;
}
图像变换:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img1 = imread("C:\\Users\\86135\\Desktop\\1.png");
resize(img1,img1,Size(500,500),4,4,2);
Mat M = (Mat_<double>(2,3)<<1,0,50,0,1,50);//往右、下各平移50
Mat dst;
// transpose(img1,dst);//转置
// flip(img1,dst,0);//上下翻转
// flip(img1,dst,1);//左右翻转
warpAffine(img1,dst,M,img1.size());//仿射变换
imshow("1",dst);
waitKey(0);
return 0;
}
仿射变换:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img1 = imread("C:\\Users\\86135\\Desktop\\1.png");
Mat M = getRotationMatrix2D(Point(img1.cols/2,img1.rows/2),45,0.7);//中心点,旋转角度,缩放比例
Mat dst;
warpAffine(img1,dst,M,img1.size());
imshow("1",dst);
waitKey(0);
return 0;
}
透视变换:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img1 = imread("C:\\Users\\86135\\Desktop\\1.png");
Point2f pts1[] = {Point2f(25,30),Point2f(179,25),
Point2f(12,188),Point2f(189,190)};
Point2f pts2[] = {Point2f(0,0),Point2f(200,0),
Point2f(0,200),Point2f(200,200)};
Mat M = getPerspectiveTransform(pts1,pts2);
Mat dst;
warpPerspective(img1,dst,M,img1.size());
imshow("1",dst);
waitKey(0);
return 0;
}
形态学操作:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
adaptiveThreshold(img,img,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,11,2);
Mat kernel = getStructuringElement(MORPH_RECT,Size(3,3));
Mat dst;
// dilate(img,dst,kernel);//膨胀
// erode(img,dst,kernel);//腐蚀
// morphologyEx(img,dst,MORPH_OPEN,kernel);//开操作
// morphologyEx(img,dst,MORPH_CLOSE,kernel);//闭操作
// morphologyEx(img,dst,MORPH_TOPHAT,kernel);//顶帽操作
// morphologyEx(img,dst,MORPH_BLACKHAT,kernel);//黑帽操作
morphologyEx(img,dst,MORPH_GRADIENT,kernel);//梯度操作
imshow("1",dst);
waitKey(0);
return 0;
}
滤波:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat dst;
Mat M = (Mat_<double>(3,3)<<1,1,0,1,0,-1,0,-1,-1);
filter2D(img,dst,-1,M);
imshow("1",dst);
waitKey(0);
return 0;
}
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png");
blur(img,dst,Size(3,3));//低通滤波
GaussianBlur(img,img,Size(3,3),1);
medianBlur(img,img,3);
bilateralFilter(img,img,9,75,75);//双边滤波
/*高通滤波*/
Laplacian(img,img,-1);
Sobel(img,img,-1,1,0);
imshow("1",img);
waitKey(0);
return 0;
}
索贝尔算子:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
// Sobel(img,img,-1,1,0);//x方向
// Sobel(img,img,-1,0,1);//y方向
Scharr(img,img,-1,1,0);
imshow("1",img);
waitKey(0);
return 0;
}
全局去噪:
#include <iostream>
#include <opencv2/opencv.hpp>
#include "CvUtils.h"
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
Mat src = imread("E:/DCIM/person/song5.jpg");
CvUtils::MatResize(src);
CvUtils::SetShowWindow(src, "src", 50, 50);
imshow("src", src);
//fastNlMeansDenoisingColored
double time = getTickCount();
Mat denoisingdst;
fastNlMeansDenoisingColored(src, denoisingdst);
time = getTickCount() - time;
cout << "运行时间:" << time / getTickFrequency() * 1000 << endl;
CvUtils::SetShowWindow(denoisingdst, "denoising", src.cols+70, 50);
imshow("denoising", denoisingdst);
waitKey(0);
return 0;
}
Canny:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Canny(img,img,50,150);
imshow("1",img);
waitKey(0);
return 0;
}
获取轮廓:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|cv::THRESH_OTSU);
vector<vector<Point>> conturs;
findContours(bi_img,conturs,RETR_TREE,CHAIN_APPROX_SIMPLE);
drawContours(img,conturs,-1,Scalar(0,0,255),2);
approxPolyDP(conturs.at(0),conturs.at(0),60, true);//取轮廓,轮廓近似
drawContours(img,conturs,-1,Scalar(0,0,255),2);
imshow("1",bi_img);
waitKey(0);
return 0;
}
凸包:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|cv::THRESH_OTSU);
vector<vector<Point>> conturs;
findContours(bi_img,conturs,RETR_TREE,CHAIN_APPROX_SIMPLE);
vector<vector<Point>> hull(conturs.size());
convexHull(conturs.at(0),hull.at(0));//检测凸性
cout<<isContourConvex(conturs.at(0))<<""<< isContourConvex(hull.at(0));//输出是否是凸的
imshow("1",bi_img);
waitKey(0);
return 0;
}
边界检测:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|cv::THRESH_OTSU);
vector<vector<Point>> conturs;
findContours(bi_img,conturs,RETR_TREE,CHAIN_APPROX_SIMPLE);
//边界矩形
Rect rect = boundingRect(conturs.at(0));
rectangle(img,Point(rect.x,rect.y),Point(rect.x+rect.width,rect.y+rect.height),Scalar(0,255,0),2);
//最小矩形
RotatedRect minRect = minAreaRect(conturs.at(0));
Point2f vs[4];
// minRect.points(vs);
vector<Point> contour;
contour.push_back(vs[0]);
contour.push_back(vs[1]);
contour.push_back(vs[2]);
contour.push_back(vs[3]);
polylines(img,contour, true,Scalar(0,255,0),2);
//最小外接圆
Point2f center;
float radius;
cv::minEnclosingCircle(contour.at(0),center,radius);
circle(img,center,radius,Scalar(0,255,0),2);
imshow("1",bi_img);
waitKey(0);
return 0;
}
面积,周长,重心:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|cv::THRESH_OTSU);
vector<vector<Point>> conturs;
findContours(bi_img,conturs,RETR_TREE,CHAIN_APPROX_SIMPLE);
Moments M = moments(conturs.at(0));
//重心
int cx = M.m10/M.m00;
int cy = M.m01/M.m00;
cout<<cx<<","<<cy<<endl;
//面积
double area = contourArea(conturs.at(0));
cout<<"area:"<<area<<endl;
//周长
double area_len = arcLength(conturs.at(0), true);
cout<<"length:"<<area_len<<endl;
imshow("1",bi_img);
waitKey(0);
return 0;
}
霍夫变换:
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat bi_img;
threshold(img,bi_img,0,255,THRESH_BINARY|cv::THRESH_OTSU);
vector<Vec4f> plines;
HoughLinesP(bi_img,plines,1,CV_PI/180,130);
for(size_t i=0;i<plines.size();i++)
{
Vec4f hline = plines[i];
line(img,Point(hline[0],hline[1]),Point(hline[2],hline[3]),Scalar(255,0,0),3);
}
imshow("1",bi_img);
waitKey(0);
return 0;
}
灰度级分层:
它的思想就是放大我们感兴趣的像素,其他像素值不变或者衰减。
void produce_intensity_level_slicing_bmp_file(cv::Mat image,int a, int b, int value)
{
for (int i = 0; i < 858; ++i)//高
{
for (int j = 0; j < 749; ++j)//宽
{
if ((image.at<uchar>(i, j) > a) && (image.at<uchar>(i, j) < b))
{
image.at<uchar>(i, j) = value;//value=0
}
}
}
}
增加对比度:
#include <opencv2\opencv.hpp>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
void imadjust(const Mat1b& src, Mat1b& dst, int tol = 1, Vec2i in = Vec2i(0,255), Vec2i out = Vec2i(0,255))
{
// src : input CV_8UC1 image
// dst : output CV_8UC1 imge
// tol : tolerance, from 0 to 100.
// in : src image bounds
// out : dst image buonds
dst = src.clone();
tol = max(0, min(100, tol));
if (tol > 0)
{
// Compute in and out limits
// Histogram
vector<int> hist(256, 0);
for (int r = 0; r < src.rows; ++r) {
for (int c = 0; c < src.cols; ++c) {
hist[src(r, c)]++;
}
}
// Cumulative histogram
vector<int> cum = hist;
for (int i = 1; i < hist.size(); ++i) {
cum[i] = cum[i - 1] + hist[i];
}
// Compute bounds
int total = src.rows * src.cols;
int low_bound = total * tol / 100;
int upp_bound = total * (100 - tol) / 100;
in[0] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), low_bound));
in[1] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), upp_bound));
}
// Stretching
float scale = float(out[1] - out[0]) / float(in[1] - in[0]);
for (int r = 0; r < dst.rows; ++r)
{
for (int c = 0; c < dst.cols; ++c)
{
int vs = max(src(r, c) - in[0], 0);
int vd = min(int(vs * scale + 0.5f) + out[0], out[1]);
dst(r, c) = saturate_cast<uchar>(vd);
}
}
}
int main()
{
Mat3b img = imread("C:\\Users\\10623\\Pictures\\Xm5oD.png");
Mat1b gray;
cvtColor(img, gray, COLOR_RGB2GRAY);
Mat1b adjusted;
imadjust(gray, adjusted);
imwrite("C:\\Users\\10623\\Pictures\\2.png",adjusted);
// int low_in, high_in, low_out, high_out
// imadjust(gray, adjusted, 0, Vec2i(low_in, high_in), Vec2i(low_out, high_out));
return 0;
}
实现matlab的imadjust亮度变化:
#include<opencv2/opencv.hpp>
#include<iostream>
#include<cassert>
#include<vector>
void imadjust(cv::Mat& input, cv::Mat& output, double low_in = 0.0, double high_in = 1.0, double low_out = 0.0, double high_out = 1.0, double gamma = 1);//matlab,像素区间[0,1]
void imadjust(cv::Mat& input, cv::Mat& output, std::vector<double> in = { 0.0, 1.0 }, double low_out = 0.0, double high_out = 1.0, double gamma = 1);
void imadjust2(cv::Mat& input, cv::Mat& output, int low_in, int high_in, int low_out, int high_out, double gamma = 1);//opencv,像素区间[0,255]
std::vector<uchar> gammaLut(const double gamma, const double c = 1.0);//灰度值的伽马变换结果表lut
bool is0to1(const double var);
int main()
{
cv::Mat src_img = cv::imread("C:\\Users\\10623\\Pictures\\1.png",0);
if (src_img.empty()) return -1;
cv::Mat dst_img;
imadjust(src_img, dst_img, 0,1, 0,1,2);
cv::imwrite("C:\\Users\\10623\\Pictures\\2.png",dst_img);
cv::waitKey(0);
return 0;
}//main
void imadjust(cv::Mat& input, cv::Mat& output, double low_in, double high_in, double low_out, double high_out, double gamma)
{
assert(low_in < high_in && is0to1(low_in) && is0to1(high_in) && is0to1(low_out) && is0to1(high_out));
//将matlab中的灰度值区间[0,1]转为opencv灰度值区间[0,255]
high_in *= 255; high_out *= 255; low_in *= 255; low_out *= 255;
imadjust2(input, output, low_in, high_in, low_out, high_out, gamma);
}
void imadjust(cv::Mat& input, cv::Mat& output, std::vector<double> in, double low_out, double high_out, double gamma)
{
assert(2 == in.size());
double low_in = in[0];
double high_in = in[1];
imadjust(input, output, low_in, high_in, low_out, high_out, gamma);
}
void imadjust2(cv::Mat& input, cv::Mat& output, int low_in, int high_in, int low_out, int high_out, double gamma)//opencv,像素区间[0,255]
{
output = input.clone();
int rows = input.rows;//行
int cols = input.cols;//列
double k = (static_cast<double>(high_out) - low_out) / (high_in - low_in);
std::vector<uchar> gamma_lut = gammaLut(gamma);
switch (input.channels())
{
case 1://灰度图
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
{
double result = 0;
if (input.at<uchar>(i, j) <= low_in)//灰度值小于low_in的像素点
{
result = low_out;//结果为low_out
}
else if (low_in < input.at<uchar>(i, j) && input.at<uchar>(i, j) < high_in)//灰度值在[low_in, high_in]
{
result = k * (input.at<uchar>(i, j) - low_in) + high_in;//灰度值线性变换
result = gamma_lut[static_cast<uchar>(result)];//灰度值gamma变换
}
else
{
result = high_out;//灰度值大于high_in的像素点,结果为high_out
}
output.at<uchar>(i, j) = static_cast<uchar>(result) % 255;
}
break;
//彩色图片
case 3:
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
for (int k = 0; k < 3; ++k)
{
double result = 0;
if (input.at<cv::Vec3b>(i, j)[k] <= low_in)
result = low_out;
else if (low_in < input.at<cv::Vec3b>(i, j)[k] && input.at<cv::Vec3b>(i, j)[k] < high_in)
{
result = k * (input.at<cv::Vec3b>(i, j)[k] - low_in) + high_in;
result = gamma_lut[static_cast<uchar>(result)];
}
else
{
result = high_out;
}
output.at<cv::Vec3b>(i, j)[k] = static_cast<uchar>(result) % 255;
}
break;
default:
break;
}
}
bool is0to1(const double var)
{
return 0 <= var && var <= 1;
}
std::vector<uchar> gammaLut(const double gamma, const double c)
{
std::vector<uchar> lut(256);
for (int i = 0; i < 256; ++i)
lut[i] = static_cast<uchar>(c * std::pow((double)(i / 255.0), gamma) * 255.0);
return lut;
}
图像反转:
/*图像反转*(用指针访问像素)*/
void Image_inversion(cv::Mat& src, cv::Mat& dst) {
int nr = src.rows;
int nc = src.cols*src.channels();
src.copyTo(dst);
if (src.isContinuous() && dst.isContinuous()) { //判断图像连续性
nr = 1;
nc = src.rows*src.cols*src.channels(); //行数*列数 * 通道数=一维数组的个数
}
for (int i = 0; i < nr; i++) {
const uchar* srcdata = src.ptr <uchar>(i); //采用指针访问像素,获取第i行的首地址
uchar* dstdata = dst.ptr <uchar>(i);
for (int j = 0; j < nc; j++) {
dstdata[j] = 255 - srcdata[j]; //开始处理每个像素
}
}
}
对数变换:
/*对数变换方法1*(灰度图像和彩色图像都适用)*/
void LogTransform1(cv::Mat& src, cv::Mat& dst, double c) {
int nr = src.rows;
int nc = src.cols*src.channels();
src.copyTo(dst);
dst.convertTo(dst, CV_64F);
if (src.isContinuous() && dst.isContinuous()) { //判断图像连续性
nr = 1;
nc = src.rows*src.cols*src.channels(); //行数*列数 * 通道数= 一维数组的个数
}
for (int i = 0; i < nr; i++) {
const uchar* srcdata = src.ptr <uchar>(i); //采用指针访问像素,获取第i行的首地址
double* dstdata = dst.ptr <double>(i);
for (int j = 0; j < nc; j++) {
dstdata[j] = c * log(double(1.0 + srcdata[j])); //开始处理每个像素
}
}
cv::normalize(dst, dst, 0, 255, cv::NORM_MINMAX); //经过对比拉升(将像素值归一化到0-255)得到最终的图像
dst.convertTo(dst, CV_8U); //转回无符号8位图像
}
/*对数变换方法2*(适用于灰度图像)*/
cv::Mat LogTransform2(cv::Mat& src, double c) {
if (src.channels() > 1)
cv::cvtColor(src, src, cv::COLOR_BGR2GRAY);
cv::Mat dst;
src.copyTo(dst);
dst.convertTo(dst, CV_64F);
dst = dst + 1.0;
cv::log(dst, dst);
dst = c * dst;
cv::normalize(dst, dst, 0, 255, cv::NORM_MINMAX); //经过对比拉升(将像素值归一化到0-255)得到最终的图像
dst.convertTo(dst, CV_8U); //转回无符号8位图像
return dst;
}
灰度切分:
分段线性变换——对比度拉伸:
/*分段线性变换——对比度拉伸*/
/*****************
三段线性变换
a<=b,c<=d
*****************/
void contrast_stretching(cv::Mat& src, cv::Mat& dst, double a, double b, double c, double d) {
src.copyTo(dst);
dst.convertTo(dst, CV_64F);
double min = 0, max = 0;
cv::minMaxLoc(dst, &min, &max, 0, 0);
int nr = dst.rows;
int nc = dst.cols *dst.channels();
if (dst.isContinuous()) {
int nr = 1;
int nc = dst.cols * dst.rows * dst.channels();
}
for (int i = 0; i < nr; i++) {
double* ptr_dst = dst.ptr<double>(i);
for (int j = 0; j < nc; j++) {
if (min <= ptr_dst[j] < a)
ptr_dst[j] = (c / a)*ptr_dst[j];
else if (a <= ptr_dst[j] < b)
ptr_dst[j] = ((d - c) / (b - a))*ptr_dst[j];
else if (b <= ptr_dst[j] < max)
ptr_dst[j] = ((max - d) / (max - b))*ptr_dst[j];
}
}
dst.convertTo(dst, CV_8U); //转回无符号8位图像
}
比特平面分层:
/***************************
num_bit - 指定bit平面
num_bit = 1~8
num_bit=1,即输出第1个Bit平面
****************************/
void Bitplane_stratification(cv::Mat& src, cv::Mat& B, int num_Bit) {
int b[8];//8个二进制bit位
if (src.channels() > 1)
cv::cvtColor(src, src, cv::COLOR_BGR2GRAY);
B.create(src.size(), src.type());
for (int i = 0; i < src.rows; i++) {
const uchar* ptr_src = src.ptr<uchar>(i);
uchar* ptr_B = B.ptr<uchar>(i);
for (int j = 0; j < src.cols; j++) {
num2Binary(ptr_src[j], b);
ptr_B[j] = b[num_Bit - 1] * 255; //0和1灰度差别太小,乘255便于视觉观察
}
}
}
通过筛选特点区域的颜色分割出特点物体:
#include <iostream>
#include <opencv2/opencv.hpp>
int main() {
cv::Mat src = cv::imread("C:\\Users\\10623\\Pictures\\eCKUa.jpg");
cv::Mat hsv;
cv::cvtColor(src,hsv,cv::COLOR_BGR2HSV);
cv::Mat mask;
cv::inRange(hsv, cv::Scalar(0,0,0), cv::Scalar(50,50,100),mask);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
cv::morphologyEx(mask,mask, cv::MORPH_CLOSE, kernel);
cv::imwrite("C:\\Users\\10623\\Pictures\\test.jpg",mask);
}
连通域分析:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
int main() {
cv::Mat src = cv::imread("C:\\Users\\10623\\Desktop\\weibiao\\504003322861042723042606068_UP2_9.jpg",0);
cv::Mat threshold,labels,stats,centroids,img_color;
cv::adaptiveThreshold(src, threshold, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY,11,2);
//cv::threshold(src,threshold, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
int nccomps = cv::connectedComponentsWithStats(
threshold, //二值图像
labels, //和原图一样大的标记图
stats, //nccomps×5的矩阵 表示每个连通区域的外接矩形和面积(pixel)
centroids //nccomps×2的矩阵 表示每个连通区域的质心
);
//去除过小区域,初始化颜色表
std::vector<cv::Vec3b> colors(nccomps);
colors[0] = cv::Vec3b(0, 0, 0); // background pixels remain black.
for (int i = 1; i < nccomps; i++) {
colors[i] = cv::Vec3b(rand() % 256, rand() % 256, rand() % 256);
//去除面积小于100的连通域
if (stats.at<int>(i, cv::CC_STAT_AREA) < 100)
colors[i] = cv::Vec3b(0, 0, 0); // small regions are painted with black too.
}
//按照label值,对不同的连通域进行着色
img_color = cv::Mat::zeros(src.size(), CV_8UC3);
for (int y = 0; y < img_color.rows; y++)
for (int x = 0; x < img_color.cols; x++)
{
int label = labels.at<int>(y, x);
CV_Assert(0 <= label && label <= nccomps);
img_color.at<cv::Vec3b>(y, x) = colors[label];
}
cv::imwrite("C:\\Users\\10623\\Pictures\\test.jpg", img_color);
}
利用拉普拉斯核进行线检测:
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat image, image_gray, image_bw, image_bw2, image_bw3, image_bw4;
image = imread("C:\\Users\\10623\\Pictures\\9.png"); //读取图像;
if (image.empty())
{
cout << "读取错误" << endl;
return -1;
}
//转换为灰度图像
cvtColor(image, image_gray, COLOR_BGR2GRAY);
//水平
Mat Laplacian_kernel = (cv::Mat_<float>(3, 3) << -1, -1, -1,
2, 2, 2,
-1, -1, -1);
filter2D(image_gray, image_bw, -1, Laplacian_kernel);
cv::imshow("image_bw", image_bw);
//45°
Mat Laplacian_kernel2 = (cv::Mat_<float>(3, 3) << 2, -1, -1,
-1, 2, -1,
-1, -1, 2);
filter2D(image_gray, image_bw2, -1, Laplacian_kernel2);
cv::imshow("image_bw2", image_bw2);
//垂直
Mat Laplacian_kernel3 = (cv::Mat_<float>(3, 3) << -1, 2, -1,
-1, 2, -1,
-1, 2, -1);
filter2D(image_gray, image_bw3, -1, Laplacian_kernel3);
cv::imshow("image_bw3", image_bw3);
//-45°
Mat Laplacian_kernel4 = (cv::Mat_<float>(3, 3) << -1, -1, 2,
-1, 2, -1,
2, -1, -1);
filter2D(image_gray, image_bw4, -1, Laplacian_kernel4);
cv::imshow("image_bw4", image_bw4);
cv::waitKey(0); //暂停,保持图像显示,等待按键结束
return 0;
}
使用区域生长分割图像:
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
int getGrayDiff(cv::Mat gray, cv::Point current_seed,int tmpX,int tmpY)
{
return abs(int(gray.at<cv::Vec3b>(current_seed.x, current_seed.y)[0]) - int(gray.at<cv::Vec3b>(tmpX,tmpY)[0]));
}
cv::Mat regional_growth(cv::Mat gray, vector<cv::Point> seedque)
{
vector<cv::Point> connects = { cv::Point(-1, -1),cv::Point(0, -1),
cv::Point(1, -1),cv::Point(1, 0),cv::Point(1, 1),cv::Point(0, 1),cv::Point(-1, 1),cv::Point(-1, 0)};
int width = gray.size[1];
int height = gray.size[0];
int threshold = 6;
int label = 255;
cv::Mat seedMark = cv::Mat::zeros(cv::Size(gray.size[1],gray.size[0]), CV_8UC1);
while (!seedque.empty())
{
cv::Point current_seed = seedque[0];
seedque.erase(seedque.begin());
seedMark.at<uchar>(current_seed.x, current_seed.y) = label;
for (int i = 0; i < 8; i++)
{
int tmpX = current_seed.x + connects[i].x;
int tmpY = current_seed.y + connects[i].y;
if (tmpX < 0 || tmpY < 0 || tmpX >= height || tmpY >= width)
{
continue;
}
int grayDiff = getGrayDiff(gray, current_seed,tmpX, tmpY);
if (grayDiff < threshold && seedMark.at<uchar>(tmpX, tmpY) != label)
{
seedque.push_back(cv::Point(tmpX, tmpY));
seedMark.at<uchar>(tmpX, tmpY) = label;
}
}
}
return seedMark;
}
int main()
{
cv::Mat image = cv::imread("C:\\Users\\10623\\Pictures\\1fcafbf84ec64432a631ae3a448353d8.png");
vector<cv::Point> seedque;
std::cout << int(image.at<cv::Vec3b>(219, 153)[0])<< std::endl;
//设定初始点作为种子生长点
seedque.push_back(cv::Point(219, 153));
seedque.push_back(cv::Point(235, 357));
cv::Mat dst = regional_growth(image, seedque);
cv::imwrite("C:\\Users\\10623\\Pictures\\test.png",dst);
return 0;
}
分水岭分割:
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat watershedSegment(Mat& srcImage, int& noOfSegments)
{
Mat grayMat;
Mat otsuMat;
cvtColor(srcImage, grayMat, CV_BGR2GRAY);
imshow("grayMat", grayMat);
// 阈值操作
threshold(grayMat, otsuMat, 0, 255,
CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
// 形态学开操作
morphologyEx(otsuMat, otsuMat, MORPH_OPEN,
kernel);
morphologyEx(otsuMat, otsuMat, MORPH_OPEN,
kernel);
cv::Mat sure_fg;
dilate(otsuMat,sure_fg,kernel);
dilate(sure_fg, sure_fg, kernel);
dilate(sure_fg, sure_fg, kernel);
// 距离变换
Mat disTranMat(otsuMat.rows, otsuMat.cols, CV_32FC1);
distanceTransform(otsuMat, disTranMat, CV_DIST_L2, 3);
// 阈值化分割图像
cv::Mat image_re = disTranMat.reshape(1);
double zmax, zmin; // 最大值,最小值
cv::Point minIdx, maxIdx; // 最小值坐标,最大值坐标
cv::minMaxLoc(image_re, &zmin, &zmax, &minIdx, &maxIdx);
threshold(disTranMat, disTranMat, 0.7 * zmax,255,0);
imwrite("C:\\Users\\10623\\Desktop\\weibiao\\result.jpg",disTranMat);//计算有几个连通域就知道有几个
disTranMat = imread("C:\\Users\\10623\\Desktop\\weibiao\\TDisTranMat.jpg");
cv::Mat unknow;
imwrite("C:\\Users\\10623\\Desktop\\weibiao\\sure_fg.jpg",sure_fg);
sure_fg = cv::imread("C:\\Users\\10623\\Desktop\\weibiao\\sure_fg.jpg");
cv::subtract(sure_fg,disTranMat,unknow);
imwrite("C:\\Users\\10623\\Desktop\\weibiao\\TDisTranMat.jpg", unknow);
return unknow;
}
int main()
{
Mat srcImage = imread("C:\\Users\\10623\\Pictures\\222.png");
int resultall = 0;
Mat result = watershedSegment(srcImage, resultall);
}
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include "opencv2/core/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
cv::Mat displaySegResult(cv::Mat& segments, int numOfSegments) //, cv::Mat& image
{
cv::Mat wshed(segments.size(), CV_8UC3); // 创建对于颜色分量
vector<Vec3b> colorTab;
for (int i = 0; i < numOfSegments; i++)
{
int b = theRNG().uniform(0, 255);
int g = theRNG().uniform(0, 255);
int r = theRNG().uniform(0, 255);
colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
} //应用不同颜色对每个部分
for (int i = 0; i < segments.rows; i++)
{
for (int j = 0; j < segments.cols; j++)
{
int index = segments.at<int>(i, j);
if (index == -1)
wshed.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
else if (index <= 0 || index > numOfSegments)
wshed.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
else
wshed.at<Vec3b>(i, j) = colorTab[index - 1];
}
}
//if (image.dims > 0)
//wshed = wshed * 0.5 + image * 0.5;
return wshed;
}
Mat watershedSegment(Mat& srcImage, int& noOfSegments)
{
Mat grayMat;
Mat otsuMat;
cvtColor(srcImage, grayMat, CV_BGR2GRAY);
imshow("grayMat", grayMat);
// 阈值操作
threshold(grayMat, otsuMat, 0, 255,
CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
imshow("otsuMat", otsuMat);
// 形态学开操作
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
morphologyEx(otsuMat, otsuMat, MORPH_OPEN,kernel);
morphologyEx(otsuMat, otsuMat, MORPH_OPEN, kernel);
imshow("Mor-openMat", otsuMat);
// 距离变换
Mat disTranMat(otsuMat.rows, otsuMat.cols, CV_32FC1);
distanceTransform(otsuMat, disTranMat, CV_DIST_L2, 3);
// 归一化
normalize(disTranMat, disTranMat, 0.0, 1, NORM_MINMAX);
imshow("DisTranMat", disTranMat);
// 阈值化分割图像,设定参数的关键
//cv::Mat image_re = disTranMat.reshape(1);
//double zmax, zmin; // 最大值,最小值
//cv::Point minIdx, maxIdx; // 最小值坐标,最大值坐标
//cv::minMaxLoc(image_re, &zmin, &zmax, &minIdx, &maxIdx);
//threshold(disTranMat, disTranMat, 0.7 * zmax, 255, 0);
threshold(disTranMat, disTranMat, 0.1, 1, CV_THRESH_BINARY);
//归一化统计图像到0-255
normalize(disTranMat, disTranMat, 0.0, 255.0, NORM_MINMAX);
disTranMat.convertTo(disTranMat, CV_8UC1);
imshow("TDisTranMat", disTranMat);
//计算标记的分割块
int i, j, compCount = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(disTranMat, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
if (contours.empty())
return Mat();
Mat markers(disTranMat.size(), CV_32S);
markers = Scalar::all(0);
int idx = 0;
// 绘制区域块
for (; idx >= 0; idx = hierarchy[idx][0], compCount++)
drawContours(markers, contours, idx,
Scalar::all(compCount + 1), -1, 8,
hierarchy, INT_MAX);
if (compCount == 0)
return Mat();
//计算算法的时间复杂度
double t = (double)getTickCount();
watershed(srcImage, markers);
t = (double)getTickCount() - t;
printf("execution time = %gms\n", t * 1000. /
getTickFrequency());
Mat wshed = displaySegResult(markers, compCount);
imshow("watershed transform", wshed);
noOfSegments = compCount;
std::cout << compCount;//个数
return markers;
}
int main()
{
Mat srcImage = imread("C:\\Users\\10623\\Pictures\\111.png");
imshow("src", srcImage);
int resultall = 0;
Mat result = watershedSegment(srcImage, resultall);
waitKey(0);
}
直方图规定化:
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
using namespace std;
using namespace cv;
Mat gray_hist; //直方图计算的结果
void CalHistogram(Mat& img);
void HistMap(Mat& img_src, Mat& img_obj);
int main()
{
//注意imread后不加参数,默认读取进来的是RGB图像
Mat img_src = imread("C:\\Users\\10623\\Desktop\\weibiao\\504003322861042723042606068_UP1_25.jpg");//待处理的图片
Mat img_obj = imread("D:\\pictures\\504003152861042723010200005002_UP2_26.jpg");//模板图片,希望变成它的直方图
Mat imgOutput; //规定化后的输出图像
//分割原图像通道
vector<Mat> src_channels;
Mat src_blue, src_green, src_red;
split(img_src, src_channels);
src_blue = src_channels.at(0);
src_green = src_channels.at(1);
src_red = src_channels.at(2);
//分割目标图像通道
vector<Mat> obj_channels;
Mat obj_blue, obj_green, obj_red;
split(img_obj, obj_channels);
obj_blue = obj_channels.at(0);
obj_green = obj_channels.at(1);
obj_red = obj_channels.at(2);
//分别对BGR通道做直方图规定化
HistMap(src_blue, obj_blue);
HistMap(src_green, obj_green);
HistMap(src_red, obj_red);
//合并通道,输出结果
merge(src_channels, imgOutput);
//显示图像
//imshow("img_src", img_src);
//imshow("img_obj", img_obj);
imwrite("C:\\Users\\10623\\Desktop\\weibiao\\test.jpg", imgOutput);
//waitKey(0);
return 0;
}
void CalHistogram(Mat& img)
{
if (img.empty())
return;
//设定bin数目
int histsize = 256;
//设定取值范围
float range[] = { 0, 256 };
const float* histRange = { range };
//调用OpenCV函数计算直方图,计算结果保存到gray_hist中
calcHist(&img, 1, 0, Mat(), gray_hist, 1, &histsize, &histRange);
}
void HistMap(Mat& img_src, Mat& img_obj)
{
int i, j; //循环变量
double gray_temp = 0; //中间结果,用于计算累计直方图
double totalPixel; //像素总数
//计算原图像直方图,并归一化到(0, 1)
CalHistogram(img_src);
totalPixel = img_src.rows * img_src.cols;
double srcHist[256];
for (i = 0; i < 256; i++)
{
srcHist[i] = gray_hist.at<float>(i) / totalPixel;
}
//计算原图像直方图的累计概率 0 ~ 1
double srcCumHist[256];
for (i = 0; i < 256; i++)
{
gray_temp = 0;
for (j = 0; j <= i; j++)
{
gray_temp += srcHist[j];
}
srcCumHist[i] = gray_temp;
}
//计算目标图像直方图
CalHistogram(img_obj);
totalPixel = img_obj.rows * img_obj.cols;
double objHist[256];
for (i = 0; i < 256; i++)
{
objHist[i] = gray_hist.at<float>(i) / totalPixel;
}
//计算目标图像直方图的累计概率 0 ~ 1
double objCumHist[256];
for (i = 0; i < 256; i++)
{
gray_temp = 0;
for (j = 0; j <= i; j++)
{
gray_temp += objHist[j];
}
objCumHist[i] = gray_temp;
}
//GML组映射
double min = 1; //设置成一个≥1的数即可
uchar map[256]; //输入->输出的映射关系
uchar groop[256]; //分组序号
for (i = 0; i < 256; i++)
{
groop[i] = -1; //初始化
}
for (i = 0; i < 256; i++) //遍历目标图像的累计直方图
{
if (objHist[i] == 0) //如果该位置的直方图为0,可以跳出这次循环了,因为不会有点映射到这里来
{
if (i > 0)
groop[i] = groop[i - 1];
continue;
}
min = 1;
for (j = 0; j < 256; j++) //遍历原图像,寻找两个直方图距离最接近的点
{
if (abs(objCumHist[i] - srcCumHist[j]) < min)
{
min = abs(objCumHist[i] - srcCumHist[j]);
groop[i] = j; //最接近的直方图位置(原图像),记录分组序号
}
}
if (i == 0) //灰度值为0的情况有点特殊
{
for (int pos = 0; pos <= groop[i]; pos++)
map[pos] = 0;
}
else
{
for (int pos = groop[i - 1] + 1; pos <= groop[i]; pos++) //属于同一组内的元素,映射到同一个灰度值
map[pos] = i; //建立映射关系
}
}
//根据映射关系进行点运算,修改原图像
uchar* p = NULL; //用于遍历像素的指针
int width = img_src.cols;
int height = img_src.rows;
for (i = 0; i < height; i++)
{
p = img_src.ptr<uchar>(i); //获取第i行的首地址
for (j = 0; j < width; j++)
{
p[j] = map[p[j]];
}
}
}
直方图均衡化:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main() {
Mat img = imread("C:\\Users\\86135\\Desktop\\1.png",0);
Mat dst;
equalizeHist(img,dst);
imshow("",dst);
waitKey(0);
return 0;
}
自适应直方图均衡化:
#include <opencv2/opencv.hpp>
int main() {
cv::Mat image = cv::imread("image.jpg", cv::IMREAD_GRAYSCALE);
cv::Mat clahe;
cv::Ptr<cv::CLAHE> clahePtr = cv::createCLAHE();
clahePtr->setClipLimit(4.0);
clahePtr->apply(image, clahe);
cv::imshow("CLAHE Image", clahe);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
cv::Mat TestMat01;
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(8, cv::Size(8, 8));//2.0是limit值
cv::Mat TestMat02;
clahe->apply(srcRoiMat, TestMat01);
这段代码同样首先读取灰度图像。然后,使用cv::createCLAHE()函数创建一个自适应直方图均衡化的对象,并设置参数。接下来,调用apply()函数将自适应直方图均衡化应用于图像。
自适应直方图均衡化与传统的直方图均衡化相比,能够根据图像局部区域的对比度来调整均衡化的程度,以保持图像的细节信息。