目录

  • 一、配置
  • 1、opencv
  • 2、dlib(GPU版)
  • 二、Kinect v1接入
  • 1、安装OpenNI2
  • 2、安装libFreenect
  • 二、代码运行
  • 1、CMakeList.txt修改
  • 2、数据+模型下载
  • 3、代码运行


一、配置

1、opencv

#安装依赖

sudo apt-get install libvtk5-dev
sudo apt-get install libgtk2.0-dev

#安装OpenCV 2.4.13

git clone https://github.com/opencv/opencv
cd opencv/
git checkout 2.4.13.3
mkdir -p build && cd build
cmake -DWITH_VTK=ON -DWITH_GTK=ON -DBUILD_opencv_calib3d=ON -DBUILD_opencv_imgproc=ON -DWITH_CUDA=OFF ..
make -j4
sudo make install

注:opencv的下载过程非常慢

2、dlib(GPU版)

需要提前安装驱动、cuda 与 cudnn 进入官网:http://dlib.net/ ,点击左下角Download dlib ver.19.17 ,下载后解压。
进入dlib根目录下

mkdir build && cd build
cmake .. 
cmake --build . --config Release
sudo make install

注:会自动检查满足安装gpu版条件,注意命令行提示信息

二、Kinect v1接入

1、安装OpenNI2

git clone https://github.com/occipital/OpenNI2.git
cd OpenNI2
make -j4

2、安装libFreenect

git clone https://github.com/OpenKinect/libfreenect
cd libfreenect

打开libfreenect/CMakeLists.txt,在33行cmake_minimum_required(VERSION 2.8.12)下一行添加

add_definitions(-std=c++11)

保存后关闭,命令行继续执行

mkdir build && cd build  
cmake .. -DBUILD_OPENNI2_DRIVER=ON  
make -j4
cp -L lib/OpenNI2-FreenectDriver/libFreenectDriver.so ${OPENNI2_DIR}/Bin/x64-Release/OpenNI2/Drivers   #OPENNI2_DIR需要修改

注:${OPENNI2_DIR}是OpenNI2的解压文件夹,比如我的在ElasticFusion文件夹,则

android活体检测代码 怎么下载活体检测控件_#include


打开libfreenect文件夹,运行

sudo cp platform/linux/udev/51-kinect.rules /etc/udev/rules.d

重启后,插上kinect后,命令行运行clsusb,查看是否包含:Xbox camera,Xbox motor,Xbox audio

android活体检测代码 怎么下载活体检测控件_活体检测_02

二、代码运行

1、CMakeList.txt修改

#CMakeList.txt

cmake_minimum_required(VERSION 2.8.4) 

project(face_dlib) 


SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -DDLIB_JPEG_SUPPORT")

IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything")
ELSEIF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
ENDIF()

# opencv
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})

# OpenNI2
set(OPENNI2_PATH "/home/zhoujie/fusion/ElasticFusion/OpenNI2") 

set(OPENNI2_INCLUDE_DIR ${OPENNI2_PATH}/Include)
set(OPENNI2_LIBRARY ${OPENNI2_PATH}/Bin/x64-Release)
include_directories (${OPENNI2_INCLUDE_DIR})
link_directories (${OPENNI2_LIBRARY})

# dlib
include(/home/zhoujie/dlib-19.17/dlib/cmake)

add_executable(this_is_who_kinect_one src/this_is_who_kinect_one.cpp)
target_link_libraries(this_is_who_kinect_one dlib ${OpenCV_LIBS} libOpenNI2.so)

macro(add_face_dlib name)
   add_executable(${name} src/${name}.cpp)
   target_link_libraries(${name} dlib )
endmacro()

add_face_dlib(train_candidate)
add_face_dlib(this_is_who)

OPENNI2_PATHdlib路径修改为自己的。

2、数据+模型下载

下载 shape_predictor_68_face_landmarks.datdlib_face_recognition_resnet_model_v1.dat 放置到model文件夹中

链接:https://pan.baidu.com/s/1jIoW6BSa5nkGWNipL7sxVQ
其中包括:

  • candidate-face.zip(人脸库:包含29个正面人脸红外图)
  • allface.zip(测试人脸集:包括29个人,每人13种脸部姿态下的红外图与深度图)
  • shape_predictor_68_face_landmarks.dat(人脸68关键点检测器)
  • dlib_face_recognition_resnet_model_v1.dat(人脸识别模型)

3、代码运行

android活体检测代码 怎么下载活体检测控件_#include_03


train_candidate.cppthis_is_whothis_is_who_kinect_one.cpp置于src文件夹中。

#build

mkdir build && cd build
cmake ..
make -j4

cmake时检查是否适用dlib(GPU)版

android活体检测代码 怎么下载活体检测控件_Kinect_04

android活体检测代码 怎么下载活体检测控件_Kinect_05


最终在build文件夹中生成多个可执行文件

android活体检测代码 怎么下载活体检测控件_Kinect_06


将人脸图片重命名为人的名字,放在data文件夹中。1、运行train_candidate 获得人脸库特征信息,存储在candidates_descriptors.datcandidates.dat 中,同时生成candidates.txt,便于查看候选人信息。每次修改人脸库,只需运行train_candidate,完成人脸信息的更新。

android活体检测代码 怎么下载活体检测控件_android活体检测代码_07

2、运行this_is_who_kinect_one,实时获取Kinect深度图与彩色图,并进行人脸识别与活体检测。

android活体检测代码 怎么下载活体检测控件_活体检测_08


3、 不使用kinect v1实时接入,在离线数据上测试,运行this_is_who,使用data/allface中的深度图与红外图,并进行人脸识别与活体检测。

android活体检测代码 怎么下载活体检测控件_dlib_09


代码部分:

#train_candidate

#include <dlib/dnn.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>

#include <iostream>
#include <fstream>
#include <time.h>
#include <dirent.h>
#include <string.h>

using namespace dlib;
using namespace std;

/* 函数声明 */
/* 人脸库训练 */
int candidates_train(const char *facesFile,std::vector<matrix<float,0,1>>&candidates_descriptors,std::vector<string>&candidates);

template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual = add_prev1<block<N,BN,1,tag1<SUBNET>>>;
template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual_down = add_prev2<avg_pool<2,2,2,2,skip1<tag2<block<N,BN,2,tag1<SUBNET>>>>>>;
template <int N, template <typename> class BN, int stride, typename SUBNET> 
using block  = BN<con<N,3,3,1,1,relu<BN<con<N,3,3,stride,stride,SUBNET>>>>>;
template <int N, typename SUBNET> using ares      = relu<residual<block,N,affine,SUBNET>>;
template <int N, typename SUBNET> using ares_down = relu<residual_down<block,N,affine,SUBNET>>;
template <typename SUBNET> using alevel0 = ares_down<256,SUBNET>;
template <typename SUBNET> using alevel1 = ares<256,ares<256,ares_down<256,SUBNET>>>;
template <typename SUBNET> using alevel2 = ares<128,ares<128,ares_down<128,SUBNET>>>;
template <typename SUBNET> using alevel3 = ares<64,ares<64,ares<64,ares_down<64,SUBNET>>>>;
template <typename SUBNET> using alevel4 = ares<32,ares<32,ares<32,SUBNET>>>;
using anet_type = loss_metric<fc_no_bias<128,avg_pool_everything<
    alevel0<
    alevel1<
    alevel2<
    alevel3<
    alevel4<
    max_pool<3,3,2,2,relu<affine<con<32,7,7,2,2,
    input_rgb_image_sized<150>
    >>>>>>>>>>>>;

int main(int argc, char** argv)
{
    if (argc == 1)
        {
            cout << "\nCall this program like this:" << endl;
            cout << "./train_candidate ../data/candidate-face/" << endl;
            return 0;
        }
    
    const char *facesFile = argv[1];

    std::vector<matrix<float,0,1>> candidates_descriptors;
    std::vector<string> candidates;
    
    /* 人脸库训练 */
    candidates_train(facesFile,candidates_descriptors,candidates);

}

/* 人脸库训练 */
int candidates_train(const char *facesFile,std::vector<matrix<float,0,1>>&candidates_descriptors,std::vector<string>&candidates)
{
    DIR *dir;
    struct dirent *ptr; 
    char base[30]; 
    const char *pick=".jpg"; //需要的子串;
    char IRfile[100];
    char *name;
    int face_num = 0;
    std::vector<matrix<rgb_pixel>> faces;

    frontal_face_detector detector = get_frontal_face_detector(); // 人脸正脸检测器
    shape_predictor sp; //人脸关键点检测器
    anet_type net;  // 人脸识别模型

    deserialize("../model/shape_predictor_68_face_landmarks.dat") >> sp;
    deserialize("../model/dlib_face_recognition_resnet_model_v1.dat") >> net;

    clock_t start,finish;
    double totaltime;
    start=clock();

    ofstream mycout("../candidates.txt");

    cout << "TRAINING START!" << endl;

    if ((dir=opendir(facesFile)) == NULL)
    {
        perror("Open dir error...");
        exit(1);
    }
    while ((ptr=readdir(dir)) != NULL)
    {
        strcpy(base, ptr->d_name);
        if(strstr(base,pick)) 
        {
            cout << "training image:" << base << endl;
            strcpy(IRfile, facesFile);
            strcat(IRfile, base);
            name = strtok(base, "_");
            string candidate = name;
            cout << "candidate: " << candidate << endl;

            mycout  << candidate << endl;

            candidates.push_back(candidate);
            matrix<rgb_pixel> img;
            load_image(img, IRfile);
            std::vector<rectangle> dets = detector(img);
            full_object_detection shape = sp(img, dets[0]);
            matrix<rgb_pixel> face_chip;
            extract_image_chip(img, get_face_chip_details(shape,150,0.25), face_chip);
            faces.push_back(move(face_chip));
            face_num += 1;
        }  
    }
    candidates_descriptors = net(faces);

    mycout.close();
    cout << "TRAINING END!" << endl;
    cout << "\nTRAIN RESULT:" << endl;
    cout << "Training " << face_num <<  " face(s)!" << endl;

    finish=clock();
    totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
    cout<<"TRAINING TIME: " << totaltime << " S!"<<endl;

    cout << "\nFace database updating……" << endl;
    serialize("../candidates_descriptors.dat") << candidates_descriptors;
    serialize("../candidates.dat") << candidates;
    cout << "Face has been updated!" << endl;
    
    closedir(dir);
    return 0;
}

#this_is_who_kinect_one.cpp

#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>

#include <OpenNI.h>

#include <dlib/dnn.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>
#include <dlib/opencv.h>

#include <time.h>
#include <dirent.h>
#include <string.h>
#include <math.h>
#include <sstream> 

using namespace std;
using namespace openni;
using namespace dlib;
using namespace cv;

template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual = add_prev1<block<N,BN,1,tag1<SUBNET>>>;
template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual_down = add_prev2<avg_pool<2,2,2,2,skip1<tag2<block<N,BN,2,tag1<SUBNET>>>>>>;
template <int N, template <typename> class BN, int stride, typename SUBNET> 
using block  = BN<con<N,3,3,1,1,relu<BN<con<N,3,3,stride,stride,SUBNET>>>>>;
template <int N, typename SUBNET> using ares      = relu<residual<block,N,affine,SUBNET>>;
template <int N, typename SUBNET> using ares_down = relu<residual_down<block,N,affine,SUBNET>>;
template <typename SUBNET> using alevel0 = ares_down<256,SUBNET>;
template <typename SUBNET> using alevel1 = ares<256,ares<256,ares_down<256,SUBNET>>>;
template <typename SUBNET> using alevel2 = ares<128,ares<128,ares_down<128,SUBNET>>>;
template <typename SUBNET> using alevel3 = ares<64,ares<64,ares<64,ares_down<64,SUBNET>>>>;
template <typename SUBNET> using alevel4 = ares<32,ares<32,ares<32,SUBNET>>>;
using anet_type = loss_metric<fc_no_bias<128,avg_pool_everything<
    alevel0<
    alevel1<
    alevel2<
    alevel3<
    alevel4<
    max_pool<3,3,2,2,relu<affine<con<32,7,7,2,2,
    input_rgb_image_sized<150>
    >>>>>>>>>>>>;

const int ITER = 5000; // 随机取点次数
const float PLANE_OR_NOT = 0.2; // 判断是否为平面的分界线
const int SIGMA = 1;

int main()
{
  frontal_face_detector detector = get_frontal_face_detector(); // 人脸正脸检测器
  shape_predictor sp; //人脸关键点检测器
  anet_type net;  // 人脸识别模型

  deserialize("../model/shape_predictor_68_face_landmarks.dat") >> sp;
  deserialize("../model/dlib_face_recognition_resnet_model_v1.dat") >> net;

  std::vector<matrix<float,0,1>> candidates_descriptors;
  deserialize("../candidates_descriptors.dat") >> candidates_descriptors;

  std::vector<string> candidates;
  deserialize("../candidates.dat") >> candidates;
  
  // 1. Initial OpenNI
  if( OpenNI::initialize() != STATUS_OK )
  {
    cerr << "OpenNI Initial Error: " 
         << OpenNI::getExtendedError() << endl;
    return -1;
  }

  // 2. Open Device
  Device mDevice;
  if( mDevice.open( ANY_DEVICE ) != STATUS_OK )
  {
    cerr << "Can't Open Device: " 
         << OpenNI::getExtendedError() << endl;
    return -1;
  }

  // 3. Create depth stream
  VideoStream mDepthStream;
  if( mDevice.hasSensor( SENSOR_DEPTH ) )
  {
    if( mDepthStream.create( mDevice, SENSOR_DEPTH ) == STATUS_OK )
    {
      // 3a. set video mode
      VideoMode mMode;
      mMode.setResolution( 640, 480 );
      mMode.setFps( 30 );
      mMode.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );

      if( mDepthStream.setVideoMode( mMode) != STATUS_OK )
      {
        cout << "Can't apply VideoMode: "
             << OpenNI::getExtendedError() << endl;
      }
    }
    else
    {
      cerr << "Can't create depth stream on device: "
           << OpenNI::getExtendedError() << endl;
      return -1;
    }
  }
  else
  {
    cerr << "ERROR: This device does not have depth sensor" << endl;
    return -1;
  }

  // 4. Create color stream
  VideoStream mColorStream;
  if( mDevice.hasSensor( SENSOR_COLOR ) )
  {
    if( mColorStream.create( mDevice, SENSOR_COLOR ) == STATUS_OK )
    {
      // 4a. set video mode
      VideoMode mMode;
      mMode.setResolution( 640, 480 );
      mMode.setFps( 30 );
      mMode.setPixelFormat( PIXEL_FORMAT_RGB888 );

      if( mColorStream.setVideoMode( mMode) != STATUS_OK )
      {
        cout << "Can't apply VideoMode: " 
             << OpenNI::getExtendedError() << endl;
      }

      // 4b. image registration
      if( mDevice.isImageRegistrationModeSupported(
              IMAGE_REGISTRATION_DEPTH_TO_COLOR ) )
      {
        mDevice.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );
      }
    }
    else
    {
      cerr << "Can't create color stream on device: "
           << OpenNI::getExtendedError() << endl;
      return -1;
    }
  }

  // 5. create OpenCV Window
  cv::namedWindow( "Depth Image", CV_WINDOW_AUTOSIZE);
  cv::namedWindow( "Color Image", CV_WINDOW_AUTOSIZE);

  // 6. start
  VideoFrameRef  mColorFrame;
  VideoFrameRef  mDepthFrame;

  mDepthStream.start();
  mColorStream.start();

  cv::Mat cImageBGR;
  cv::Mat DepthMat;

  int iMaxDepth = mDepthStream.getMaxPixelValue();
  while( true )
  {
    // 8a. get depth frame
    if( mDepthStream.readFrame( &mDepthFrame ) == STATUS_OK )
    {
      // 8b. convert data to OpenCV format
      const cv::Mat mImageDepth(
                mDepthFrame.getHeight(), mDepthFrame.getWidth(),
                CV_16UC1, (void*)mDepthFrame.getData() );

      DepthMat = mImageDepth.clone();

      // 8c. re-map depth data [0,Max] to [0,255]
      cv::Mat mScaledDepth;
      mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

      // 8d. show image
      cv::imshow( "Depth Image", mScaledDepth );
    }
    
    // 7a. get color frame
    if( mColorStream.readFrame( &mColorFrame ) == STATUS_OK )
    {
      // 7b. convert data to OpenCV format
      const cv::Mat mImageRGB(
              mColorFrame.getHeight(), mColorFrame.getWidth(),
              CV_8UC3, (void*)mColorFrame.getData() );
      // 7c. convert form RGB to BGR
      
      
      cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
      // 7d. show image

      //dlib
      cv_image<bgr_pixel> cimg(cImageBGR);

      // Detect faces 
      std::vector<dlib::rectangle> dets = detector(cimg);

      if(dets.size() != 0){
        int livedetector;
        string who,isface;
        float who_probability;
        float pretotal_ary;

        if(dets.size() != 1){
          livedetector = -1;
          for (unsigned long j = 0; j < dets.size(); ++j){
            Rect rect(dets[j].left(),dets[j].top(),dets[j].right() -dets[j].left(), dets[j].bottom() -dets[j].top());//左上坐标(x,y)和矩形的长(x)宽(y)
            cv::rectangle(cImageBGR, rect, Scalar(0, 255, 0),1,8,0);
          }

        }
        else{
          Rect rect(dets[0].left(),dets[0].top(),dets[0].right() -dets[0].left(), dets[0].bottom() -dets[0].top());//左上坐标(x,y)和矩形的长(x)宽(y)
          cv::rectangle(cImageBGR, rect, Scalar(0, 255, 0),1,8,0);

          full_object_detection shape = sp(cimg, dets[0]);
          std::vector<matrix<rgb_pixel>> faces;
          matrix<rgb_pixel> face_chip;
          extract_image_chip(cimg, get_face_chip_details(shape,150,0.25), face_chip);
          faces.push_back(move(face_chip));
          std::vector<matrix<float,0,1>> face_descriptors = net(faces);

          float distance;
          float best_distance = length(face_descriptors[0]-candidates_descriptors[0]);
          size_t candidates_num = candidates_descriptors.size();
          int candidates_num_int = static_cast<int>(candidates_num);
          int best_k = 0;
          for (int k = 1; k < candidates_num_int; k++)
          {
              distance = length(face_descriptors[0]-candidates_descriptors[k]);
              if (distance < best_distance) 
              {
                  best_distance = distance;
                  best_k = k;
              }
          }
          if (best_distance < 0.6) {
              who = candidates[best_k];
              who_probability =  (1.05-0.66*best_distance)*100 < 100?(1.05-0.66*best_distance)*100:99;
          }
          else{
              who = "Unknow";
          }

          // liveness_detection(DepthMat,locates,livedetector);
          int COL ,ROW ,FACE_WIDTH ,FACE_HEIGHT; 
          int faceno0_num;
          int FaceDATA[3][100000];
          int k;
          int pretotal;
          int x[3],y[3],z[3];  // 随机取三个点 
          float a,b,c;  // 拟合平面方程 z=ax+by+c
          int rand_num[3];
          float check,distance2;
          int total;

          COL = rect.x;
          ROW = rect.y;
          FACE_WIDTH = rect.width;
          FACE_HEIGHT = rect.height; 
          faceno0_num = FACE_HEIGHT*FACE_WIDTH -1; 
          k = 0;
          for(int m = 1;m< FACE_HEIGHT+1;m++)
          {
              for(int n= 1;n< FACE_WIDTH+1;n++) 
              { 
                  ushort tmp = DepthMat.at<ushort>(COL+n-1,ROW+m-1);
                  if (tmp == 0)
                  {
                      faceno0_num -= 1; // 非零深度点个数为 faceno0_num+1
                      continue;
                  }
                  FaceDATA[0][k] = n; 
                  FaceDATA[1][k] = m;
                  FaceDATA[2][k] = tmp;
                  k += 1;
              } 
          } 

          if(faceno0_num > 2000){
            pretotal = 0;  // 符合拟合模型的数据的个数 
            srand((unsigned)time(NULL));
            total = 0;
            for(k = 0; k < ITER; k++)
            {
              do{
                  rand_num[0] = std::rand()%faceno0_num; 
                  rand_num[1] = std::rand()%faceno0_num; 
                  rand_num[2] = std::rand()%faceno0_num; 
              }while(rand_num[0] == rand_num[1] || rand_num[0] == rand_num[2] || rand_num[1] == rand_num[2]);
              for(int n = 0; n < 3; n++ )
              {
                  x[n] = FaceDATA[0][rand_num[n]];
                  y[n] = FaceDATA[1][rand_num[n]];
                  z[n] = FaceDATA[2][rand_num[n]];
              }
              check = (x[0]-x[1])*(y[0]-y[2]) - (x[0]-x[2])*(y[0]-y[1]);
              if ( check == 0)  // 防止提示浮点数例外 (核心已转储)
              {
                  k -= 1;
                  continue;
              }
              a = ( (z[0]-z[1])*(y[0]-y[2]) - (z[0]-z[2])*(y[0]-y[1]) )*1.0/( (x[0]-x[1])*(y[0]-y[2]) - (x[0]-x[2])*(y[0]-y[1]) );
              if (y[0] == y[2])  // 防止提示浮点数例外 (核心已转储)
              {
                  k -= 1;
                  continue;
              }
              b = ((z[0] - z[2]) - a * (x[0] - x[2]))*1.0/(y[0]-y[2]);
              c = z[0]- a * x[0] - b * y[0];
              total = 0;
              for(int n = 0; n < faceno0_num +1 ; n++ )
              {
                  distance2 = fabs(a*FaceDATA[0][n] + b*FaceDATA[1][n] - 1*FaceDATA[2][n] + c*1);
                  if (distance2 < SIGMA)
                  {
                      total +=1;
                  }
              }
              if (total > pretotal)  // 找到符合拟合平面数据最多的拟合平面
              {
                  pretotal=total;
              }   
            }

            pretotal_ary = pretotal *1.0/ faceno0_num;
            if (pretotal_ary < PLANE_OR_NOT){
              livedetector = 1;
            }
            else{
              livedetector = 0;
            }
          
          }
          else{
            livedetector = -2;
          }
        }

        if(livedetector == -1){
          who = "More than one face";
          isface = "Please detect one face";
        }
        if(livedetector == 1){
          isface = "Is FACE";
        }
        if(livedetector == 0){
          isface = "Is not FACE";
        }
        if(livedetector == -2){
          isface = "Lack of depth information";
          pretotal_ary = 0;
        }

        stringstream strStream1; 
        strStream1 << who << " , " << who_probability << "%"; 
        string str1 = strStream1.str();  

        stringstream strStream2; 
        strStream2 << isface << " , " << "pretotal_ary:" << pretotal_ary; 
        string str2 = strStream2.str();  

        cv::putText(cImageBGR, "RECOGNITION RESULT: ", cv::Point(20,20), cv::FONT_HERSHEY_COMPLEX,0.4,Scalar(255, 255, 255),1,8,0);
        cv::putText(cImageBGR, str1, cv::Point(20,40), cv::FONT_HERSHEY_COMPLEX,0.5,Scalar(0, 255, 0),1,8,0);
        cv::putText(cImageBGR, "LIVENESS DETECTION RESULT: ", cv::Point(20,60), cv::FONT_HERSHEY_COMPLEX,0.4,Scalar(255, 255, 255),1,8,0);
        cv::putText(cImageBGR, str2, cv::Point(20,80), cv::FONT_HERSHEY_COMPLEX,0.5,Scalar(0, 255, 0),1,8,0);
      }

      cv::imshow( "Color Image", cImageBGR);
    }

    // 6a. check keyboard
    if( cv::waitKey( 1 ) == 'q' ){
      break;
    }
  }

  // 9. stop
  mDepthStream.destroy();
  mColorStream.destroy();
  mDevice.close();
  OpenNI::shutdown();

  return 0;
}

#this_is_who.cpp

#include <dlib/dnn.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>

#include <iostream>
#include <time.h>
#include <dirent.h>
#include <string.h>
#include <math.h>

using namespace dlib;
using namespace std;

/* 函数声明 */
/* 输出人脸位置 返回识别结果 */
string face_location(const char *imgFile,std::vector<int>&locates, std::vector<matrix<float,0,1>>&candidates_descriptors,std::vector<string>&candidates);
/* 判断是否为活体 */
bool liveness_detection(const char *DeepFile,std::vector<int>&locates);  

const int IMG_HEIGHT =  720;
const int IMG_WIDTH =  1280;

template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual = add_prev1<block<N,BN,1,tag1<SUBNET>>>;
template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual_down = add_prev2<avg_pool<2,2,2,2,skip1<tag2<block<N,BN,2,tag1<SUBNET>>>>>>;
template <int N, template <typename> class BN, int stride, typename SUBNET> 
using block  = BN<con<N,3,3,1,1,relu<BN<con<N,3,3,stride,stride,SUBNET>>>>>;
template <int N, typename SUBNET> using ares      = relu<residual<block,N,affine,SUBNET>>;
template <int N, typename SUBNET> using ares_down = relu<residual_down<block,N,affine,SUBNET>>;
template <typename SUBNET> using alevel0 = ares_down<256,SUBNET>;
template <typename SUBNET> using alevel1 = ares<256,ares<256,ares_down<256,SUBNET>>>;
template <typename SUBNET> using alevel2 = ares<128,ares<128,ares_down<128,SUBNET>>>;
template <typename SUBNET> using alevel3 = ares<64,ares<64,ares<64,ares_down<64,SUBNET>>>>;
template <typename SUBNET> using alevel4 = ares<32,ares<32,ares<32,SUBNET>>>;
using anet_type = loss_metric<fc_no_bias<128,avg_pool_everything<
    alevel0<
    alevel1<
    alevel2<
    alevel3<
    alevel4<
    max_pool<3,3,2,2,relu<affine<con<32,7,7,2,2,
    input_rgb_image_sized<150>
    >>>>>>>>>>>>;

int main(int argc, char** argv)
{
    if (argc == 1)
        {
            cout << "\nCall this program like this:" << endl;
            cout << "./this_is_who ../data/allface/0004_IR_allleft.jpg ../data/allface/0004_raw_allleft.raw" << endl;
            return 0;
        }
    
    const char *imgFile = argv[1];
    const char *DeepFile = argv[2];

    
    std::vector<matrix<float,0,1>> candidates_descriptors;
    deserialize("../candidates_descriptors.dat") >> candidates_descriptors;

    std::vector<string> candidates;
    deserialize("../candidates.dat") >> candidates;
   
    std::vector<int> locates;

    /* 输出人脸位置 返回识别结果 */
    string who = face_location(imgFile, locates, candidates_descriptors,candidates); 
    cout << "\nRECOGNITION RESULT:" << endl;
    cout << "This is " << who << endl;

    //深度图与红外图是水平翻转的
    locates[0] = IMG_WIDTH - locates[0] -locates[2];  

    /* 判断是否为活体 */
    liveness_detection( DeepFile, locates);

}

/* 函数 输出人脸位置 返回识别结果 */
string face_location(const char* imgFile,std::vector<int>&locates, std::vector<matrix<float,0,1>>&candidates_descriptors,std::vector<string>&candidates)
{   
    frontal_face_detector detector = get_frontal_face_detector(); // 人脸正脸检测器
    shape_predictor sp; //人脸关键点检测器
    anet_type net;  // 人脸识别模型

    deserialize("../model/shape_predictor_68_face_landmarks.dat") >> sp;
    deserialize("../model/dlib_face_recognition_resnet_model_v1.dat") >> net;
    
    cout << "\nprocessing image " << imgFile << endl;
    matrix<rgb_pixel> img;
    load_image(img, imgFile);
    std::vector<rectangle> dets = detector(img);
    // cout << "Number of faces detected: " << dets.size() << endl;
    locates.push_back(dets[0].left());
    locates.push_back(dets[0].top());
    locates.push_back(dets[0].right() - dets[0].left());
    locates.push_back(dets[0].bottom() - dets[0].top());
    full_object_detection shape = sp(img, dets[0]);
    std::vector<matrix<rgb_pixel>> faces;
    matrix<rgb_pixel> face_chip;
    extract_image_chip(img, get_face_chip_details(shape,150,0.25), face_chip);
    faces.push_back(move(face_chip));
    std::vector<matrix<float,0,1>> face_descriptors = net(faces);
    float distance;
    float best_distance = length(face_descriptors[0]-candidates_descriptors[0]);
    size_t candidates_num = candidates_descriptors.size();
    int candidates_num_int = static_cast<int>(candidates_num);
    int best_k = 0;
    for (int k = 1; k < candidates_num_int; k++)
    {
        distance = length(face_descriptors[0]-candidates_descriptors[k]);
        if (distance < best_distance) 
        {
            best_distance = distance;
            best_k = k;
        }
    }
    string who;
    if (best_distance < 0.6) {
        who = candidates[best_k];
    }
    else{
        who = "Unknow";
    }
    return who;
}
/* 函数判断是否为活体 */
bool liveness_detection(const char *DeepFile,std::vector<int>&locates)
{
    const int ITER = 5000; // 随机取点次数
    const float PLANE_OR_NOT = 0.2; // 判断是否为平面的分界线
    const int SIGMA = 1;
    typedef unsigned short UNIT16;
    // 从.raw读取二进制16位数据到MatDATA
    UNIT16 MatDATA[IMG_HEIGHT*IMG_WIDTH];
    FILE *fp = NULL;
    fp = fopen( DeepFile, "rb" );
    size_t sizeRead = fread(MatDATA,sizeof(UNIT16),IMG_HEIGHT*IMG_WIDTH,fp);
    if (sizeRead != IMG_HEIGHT*IMG_WIDTH) {
        cout << "DeepFile open error!" << endl;
        return 0;
    }    
    fclose(fp);
    int n = 0;
    int i,j;
    int COL = locates[0],ROW = locates[1],FACE_WIDTH = locates[2],FACE_HEIGHT = locates[3]; //位置信息
    // txt :157 66 172 198 , 取行66:66+198,列取157:157+172
    int faceno0_num = FACE_HEIGHT*FACE_WIDTH -1; 
    int FaceDATA[3][100000];
    n = 0;
    for(i = 1;i< FACE_HEIGHT+1;i++)
        {
            for(j= 1;j< FACE_WIDTH+1;j++) 
            { 
                if (MatDATA[IMG_WIDTH*(ROW+i-2)+COL+j-2] == 0)
                {
                    faceno0_num -= 1; // 非零深度点个数为 faceno0_num+1
                    continue;
                }
                FaceDATA[1][n] = i;
                FaceDATA[0][n] = j; 
                FaceDATA[2][n] = MatDATA[IMG_WIDTH*(ROW+i-2)+COL+j-2];
                n += 1;
            } 
        } 

    int pretotal = 0;  // 符合拟合模型的数据的个数
    int x[3],y[3],z[3];  // 随机取三个点 
    srand((unsigned)time(NULL));
    float a,b,c;  // 拟合平面方程 z=ax+by+c
    // float besta,bestb,bestc;  // 最佳参数
    int rand_num[3];
    float check,distance;
    int total = 0;
    for(i = 0; i < ITER; i++)
    {
        do{
            rand_num[0] = std::rand()%faceno0_num; 
            rand_num[1] = std::rand()%faceno0_num; 
            rand_num[2] = std::rand()%faceno0_num; 
        }while(rand_num[0] == rand_num[1] || rand_num[0] == rand_num[2] || rand_num[1] == rand_num[2]);
        for(n = 0; n < 3; n++ )
        {
            x[n] = FaceDATA[0][rand_num[n]];
            y[n] = FaceDATA[1][rand_num[n]];
            z[n] = FaceDATA[2][rand_num[n]];
        }
        check = (x[0]-x[1])*(y[0]-y[2]) - (x[0]-x[2])*(y[0]-y[1]);
        if ( check == 0)  // 防止提示浮点数例外 (核心已转储)
        {
            i -= 1;
            continue;
        }
        a = ( (z[0]-z[1])*(y[0]-y[2]) - (z[0]-z[2])*(y[0]-y[1]) )*1.0/( (x[0]-x[1])*(y[0]-y[2]) - (x[0]-x[2])*(y[0]-y[1]) );
        if (y[0] == y[2])  // 防止提示浮点数例外 (核心已转储)
        {
            i -= 1;
            continue;
        }
        b = ((z[0] - z[2]) - a * (x[0] - x[2]))*1.0/(y[0]-y[2]);
        c = z[0]- a * x[0] - b * y[0];
        total = 0;
        for(n = 0; n < faceno0_num +1 ; n++ )
        {
            distance = fabs(a*FaceDATA[0][n] + b*FaceDATA[1][n] - 1*FaceDATA[2][n] + c*1);
            if (distance < SIGMA)
            {
                total +=1;
            }
        }
        if (total > pretotal)  // 找到符合拟合平面数据最多的拟合平面
        {
            pretotal=total;
            // besta = a;
            // bestb = b;
            // bestc = c;
        }
    }
    float pretotal_ary = pretotal *1.0/ faceno0_num ;
    cout << "\nLIVENESS DETECTION RESULT:"  << endl;
    bool IS_FACE;
    if (pretotal_ary < PLANE_OR_NOT)
    {
        IS_FACE =  true;
        cout << "pretotal_ary = " << pretotal_ary  << " , Is FACE!" << endl;
    }
    else
    {
        IS_FACE = false;
        cout << "pretotal_ary = " << pretotal_ary  << " , Is not FACE!" << endl;
    }
    return  IS_FACE;
}

代码比较粗糙,应该以后也不会再改了……