在前面介绍了​​如何利用opencv调用本地摄像头​​​并​​注册人脸数据​​​,以及如何进行​​人脸数据比对​​,从而识别出照片中的人脸。

这一节我们将整合之前的逻辑,进行实时的动态人脸识别

实现步骤

  1. 加载已经录入的人脸数据
  2. 读取摄像头图像
  3. 图像编码格式转换
  4. 获取与已知人脸数据的比对结果
  5. 绘制图像并显示


# This is a sample Python script.
import datetime
import time
import os.path
import random
import threading

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import cv2 as cv
import dlib
import numpy as np
import face_recognition
from concurrent.futures import ThreadPoolExecutor

# 加载人脸检测器和关键点检测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")


# 全局变量,存储实时检测人脸数据
g_face_data = []



# 根据已有人脸数据,进行人脸识别
def recognize_face():
known_face_encodings = []
labels = []
known_face_names = []
global g_face_data

# 加载已经录入的人脸数据
load_known_data(known_face_encodings, known_face_names, labels)

# 打开摄像头
cap = cv.VideoCapture(0)
fps = cap.get(cv.CAP_PROP_FPS)
print("fps:", fps)

while True:
# 读取摄像头图像
ret, frame = cap.read()

# 将编码格式由BGR转为RGB
rgb_frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)

# 比对数据
g_face_data = compare_face(rgb_frame, known_face_encodings, known_face_names)

# 绘制图形
paint_frame(frame)

# 检测按键,按下q退出循环
if cv.waitKey(int(1000 / 30)) & 0xFF == ord('q'):
break

cap.release()
cv.destroyAllWindows()


# 加载已经录入的人脸数据
def load_known_data(known_face_encodings, known_face_names, labels):
for root, dirs, files in os.walk("faces_new"):
for i, dr in enumerate(dirs):
for rt, drs, f in os.walk("faces_new/" + dr):
for j, file in enumerate(f):
# img = cv.imdecode(np.fromfile('faces/' + dr + '/' + file, dtype=np.uint8), cv.IMREAD_COLOR)
img = face_recognition.load_image_file('faces_new/' + dr + '/' + file)
# img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face_encoding = face_recognition.face_encodings(img)

for e, fe in enumerate(face_encoding):
known_face_encodings.append(fe)
labels.append(i)
known_face_names.append(dr)


# 绘制图像
def paint_frame(frame):
global g_face_data
if len(g_face_data) > 0:
for k, value in enumerate(g_face_data):
top, right, bottom, left = value['location']
name = value['name']
# 绘制矩形框
cv.rectangle(frame, (left, top, right - left, bottom - top), (0, 255, 0), 2)
cv.putText(frame, name, (left + 10, bottom - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

cv.imshow('video', frame)



# 人脸数据比对
def compare_face(rgb_frame, known_face_encodings, known_face_names):

res = []
# 获取人脸位置
locations = face_recognition.face_locations(rgb_frame)
# 识别人脸特征数据
face_encodings = face_recognition.face_encodings(rgb_frame, locations, model='cnn')

for (top, right, bottom, left), face_encoding in zip(locations, face_encodings):
# 遍历每张人脸,与已知数据进行比对
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, .5)
if True in matches:
index = matches.index(True)
name = known_face_names[index]
res.append({'location': (top, right, bottom, left), 'name': name})
else:
res.append({'location': (top, right, bottom, left), 'name': 'Unknown'})

return res


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print('开始识别......')
recognize_face()

以上代码虽然已经能够动态的进行识别,画面却很卡顿。因为face_recognition.face_locations 与 face_recognition.face_encodings比较消耗性能,所以需要对图像像素做优化处理,或者进行异步处理(异步处理这块我试着做过,不知道为什么用处不大,一样的非常卡顿,还请知道的大佬指点一下)