先上上效果图:
然后理一下用到的opencv知识:
1. 带人脸的图像预处理,灰度化就够了,缩小一下增加帧率;opencv的haar特征分类器;
face_cascade.detectMultiScale(image_face, faceRect, 1.2, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
2. 同1得出眼部区域,opencv的神经网络api来训练出眼部区域(为了增加帧率);这篇文章介绍了它的神经网络及实现脸部区域和其器官的位置的神经网络训练过程。
预处理的代码如下:
// 返回 int[6] 目的:得出眼部和面部的坐标,通过神经网络训练出关系。(眼部识别更快速,用来推测出面部上边界和宽度)
// int[0]代表识别到的区域的类型,2代表都识别到了
// int[1] int[2]眼部的左上 int[3]眼部宽度
// int[4] int[5]面部的左上 int[6]面部宽度
int *facedeal::data_save(cv::Mat image)
{
int *data = new int[7];
cv::Mat image_gray,image_face;
cv::cvtColor(image,image_gray,CV_BGR2GRAY);
cv::equalizeHist(image_gray,image_gray); // little effect
cv::resize(image_gray,image_face,cv::Size(),reduce_radio,reduce_radio,cv::INTER_LINEAR);
data[0] = 0;
//检测关于face位置 !注意:这里是缩放后的位置
face_cascade.detectMultiScale(image_face, faceRect, 1.2, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
for(size_t i = 0;i < faceRect.size();i ++){
data[0]++;
if (i >= 1)
continue;
faceRect[i] = faceRect[i] + cv::Point((radio-1) *faceRect[i].x, (radio-1) *faceRect[i].y); //平移左上顶点Point
faceRect[i] = faceRect[i] + cv::Size((radio-1) *faceRect[i].width, (radio-1) *faceRect[i].height); //缩放,左上顶点不变,宽高
rectangle(image, faceRect[i], cv::Scalar(0, 0, 255), 1); //画出识别的face
data[4] = faceRect[i].x;
data[5] = faceRect[i].y;
data[6] = faceRect[i].width;
}
eye_cascade.detectMultiScale(image_gray, eyeRect, 1.2, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
for(size_t i = 0;i < eyeRect.size();i ++){
data[0]++;
if (i >= 1)
continue;
rectangle(image, eyeRect[i], cv::Scalar(0, 0, 255), 2); //画出识别的eyes
data[1] = eyeRect[i].x;
data[2] = eyeRect[i].y;
data[3] = eyeRect[i].width;
}
return data;
}
//检测脸部,输入图像作为处理
cv::Mat facedeal::face_detection(cv::Mat image)
{
cv::Mat image_gray,image_face,img;
img = image.clone();
cv::Mat facedata = (cv::Mat_<float>(1,3) << 0,0,0);
cv::cvtColor(img,image_gray,CV_BGR2GRAY);
cv::resize(image_gray,image_gray,cv::Size(),reduce_radio,reduce_radio,cv::INTER_LINEAR);
cv::equalizeHist(image_gray,image_face); // little effect
//检测关于face位置 !注意:这里是缩放后的位置
face_cascade.detectMultiScale(image_face, faceRect, 1.2, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
for(size_t i = 0;i < faceRect.size();i ++){
//cout << "0: " << faceRect[i].width <<","<< faceRect[i].height <<"("<<faceRect[i].x <<","<<faceRect[i].y<<endl;
faceRect[i] = faceRect[i] + cv::Point((radio-1) *faceRect[i].x, (radio-1) *faceRect[i].y); //平移左上顶点Point
faceRect[i] = faceRect[i] + cv::Size((radio-1) *faceRect[i].width, (radio-1) *faceRect[i].height); //缩放,左上顶点不变,宽高
// faceRect[i] = faceRect[i] + cv::Point(radio *faceRect[i].x, radio *faceRect[i].y); //平移左上顶点Point
// cout << "1: " << faceRect[i].width <<","<< faceRect[i].height <<"("<<faceRect[i].x <<","<<faceRect[i].y<<endl;
//rectangle(image, faceRect[i], cv::Scalar(0, 0, 255), 1); //画出识别的face
facedata = (cv::Mat_<float>(1,3) << faceRect[0].x,faceRect[0].y,faceRect[0].width);
break;
}
return facedata;
}
//检测眼部,输入图像进行处理
cv::Mat facedeal::eyes_detection(cv::Mat image)
{
cv::Mat image_gray,image_eyes;
cv::Mat eyesdata(1, 3, CV_32FC1);
eyesdata = (cv::Mat_<float>(1,3) << 0,0,0);
cv::cvtColor(image,image_gray,CV_BGR2GRAY);
//cv::resize(image_gray,image_gray,cv::Size(),1,1,cv::INTER_LINEAR);
cv::equalizeHist(image_gray,image_eyes);
eye_cascade.detectMultiScale(image_eyes, eyeRect, 1.2, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
for(size_t i = 0;i < eyeRect.size();i ++){
rectangle(image, eyeRect[i], cv::Scalar(0, 0, 255), 2); //画出识别的eyes
eyesdata = (cv::Mat_<float>(1,3) << eyeRect[0].x,eyeRect[0].y,eyeRect[0].width);
break;
}
return eyesdata;
}
3. 通过眼部区域,二值化得到两眼的较准确相对位置,与水平的角度;主要需要二值化,像素点比较。
4. 通过肤色检测,脸部识别矩形框与皮肤的位置,得到脸部旋转角度。
//从脸部得到x透视角度,从eye得出旋转角度
bool facedeal::get_faceErr(cv::Mat image,cv::Mat eyes) //eyes <float>类型,其他类型会错误
{
if(!faceRect.data()){
err_y = 0;
arr_x = 0;
return 0;
}
cv::Mat img = image.clone();
cv::Mat eyes0 = img(cv::Rect(eyes.at<float>(0,0), eyes.at<float>(0,1) , eyes.at<float>(0,2), eyes.at<float>(0,3)));
cv::Mat eyes1;
cv::Point p1(0,0),p2(0,0);
int num1=0,num2=0;
cv::cvtColor(eyes0,eyes1,CV_BGR2GRAY);
cv::threshold(eyes1,eyes1,70, 255, CV_THRESH_BINARY);
// cv::imshow("eyemask",eyes0);
// cv::imshow("mask",eyes1);
for(int ix = 0;ix < eyes1.cols/2;ix++)
for(int iy = 0;iy <eyes1.rows;iy++){
if(!eyes1.at<uchar>(iy,ix)) {
p1.x += ix;
p1.y +=iy;
num1 ++;
}
}
if(p1.x>0&&p1.y>0&&num1>0){ //防止 0/
p1.x = p1.x/num1;
p1.y = p1.y/num1;
}
for(int ix = eyes1.cols/2;ix < eyes1.cols;ix++)
for(int iy = 0;iy <eyes1.rows;iy++){
if(!eyes1.at<uchar>(iy,ix)) {
p2.x += ix;
p2.y +=iy;
num2 ++;
}
}
if(p2.x>0&&p2.y>0&&num2>0){ //防止 0/
p2.x = p2.x/num2;
p2.y = p2.y/num2;
}
err_y = p2.y - p1.y; // > 0 : 左高右低
int sum_err = 0;
int start_y,end_y;
cv::Mat faceroi = img(faceRect[0]);
start_y = 0.5*faceroi.rows - 10;
end_y = 0.5*faceroi.rows + 10;
vector<int> face_left(end_y - start_y),face_right(end_y - start_y),face_err(end_y - start_y);
cv::cvtColor(faceroi,faceroi,CV_BGR2YCrCb);
for(int i = start_y;i < end_y; i ++)
{
for(int j = 0;j < faceroi.cols; j++){
if((faceroi.at<cv::Vec3b>(i,j)[1] > crmax)||(faceroi.at<cv::Vec3b>(i,j)[1] < crmin)
||(faceroi.at<cv::Vec3b>(i,j)[2] > cbmax)||(faceroi.at<cv::Vec3b>(i,j)[2] < cbmin))
;
else {
face_left[i- start_y] = j;
break;
}
}
for(int j = faceroi.cols;j > 0 ;j--){
if((faceroi.at<cv::Vec3b>(i,j)[1] > crmax)||(faceroi.at<cv::Vec3b>(i,j)[1] < crmin)
||(faceroi.at<cv::Vec3b>(i,j)[2] > cbmax)||(faceroi.at<cv::Vec3b>(i,j)[2] < cbmin))
;
else {
face_right[i - start_y] = faceroi.cols - j;
break;
}
}
face_err[i - start_y] = face_left[i - start_y] - face_right[ i- start_y];
sum_err += face_err[i - start_y];
}
arr_x = sum_err/(end_y - start_y); // > 0 : 左多右少
//cout << "x "<<arr_x<<" y "<<err_y<<endl;
return 1;
}
5. 通过角度进行透视变换,透视变换,输入角度,需要opencv的四点法找出透视变换矩阵,再用透视变换函数进行变换。
getPerspectiveTransform(const Point2f src[], const Point2f dst[])
6. 贴图:roi范围提取、贴图处理、mask贴图、与roi比例相加
glass1(cv::Rect(-Dx[1],-Dy[1], glass1.cols + Dx[0] +Dx[1], glass1.rows + Dy[0] +Dy[1])).copyTo(glass1);
imageROI = image(cv::Rect(ROI_x - Dx[1],ROI_y - Dy[1],glass1.cols, glass1.rows)); //显示不完整
}
cv::cvtColor(glass1,glass_g,CV_BGR2GRAY);
cv::threshold(glass_g,glass_mask1, 100, 255, cv::THRESH_BINARY);
cv::bitwise_not(glass_mask1,glass_mask2);
cv::Mat i1(cv::Size(glass1.cols,glass0.rows),CV_8UC3);
cv::addWeighted(glass1,1.0,imageROI,0.2,0.,i1); //与原图比例加
//贴mask
i1.copyTo(imageROI,glass_mask2);
//cv::medianBlur(imageROI,imageROI,5);
7. 解决贴图闪烁问题(偶尔一帧无法检测到脸部区域),一个简单的追踪问题。----下一步,追踪问题的更多思考!
8. 解决贴图抖动问题,消除识别区域的不稳定性。
两者的处理方式大同小异,通过和前一帧(的脸部区域)进行比较,通过求两次矩阵差的平均值,和阈值比较,来判断贴图位置是否需要变化。
#if 1 //消除闪烁
if(facedata.at<float>(0,2) == 0){ //这一帧没检测到
cv::cvtColor(image,mask_image,CV_BGR2GRAY);
cv::cvtColor(last_image,mask_lastimage,CV_BGR2GRAY);
absdiff(mask_image,mask_lastimage,err_image);
cv::imshow("ds",err_image);
result_err = mean(err_image);
std::cout << "err: "<<result_err[0] <<std::endl;
if(result_err[0] < 1.5){
facedata = last_facedata.clone();
}
}
last_image = image.clone();
#endif
#if 1 //解决贴图抖动问题
absdiff(facedata,last_facedata,err_face); //两个矩阵差的绝对值
mean_err = cv::sum(err_face); //mean求平均,<10 不显著
//std::cout <<"last:"<<last_facedata<<"new:"<<facedata<< "err: "<<mean_err <<std::endl;
if(mean_err[0] < 100)
facedata = last_facedata.clone();
last_facedata = facedata.clone();
#endif