opencv 模板匹配 matchTemplate

模板匹配
在一幅图像中寻找一个特定目标的方法。遍历图像中的每一个位置,比较与模板是否“相似”,当相似度足够高时,就认为找到了目标。
常用于目标检测、相似度分析

//! computes the proximity map for the raster template and the image where the template is searched for
CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ,
                                 OutputArray result, int method );

功能:在输入搜索图像中按照method方法匹配模板
image
用于搜索的输入图像, 8U 或 32F, 大小 W-H
templ
匹配模板图像,和image类型相同, 大小 w-h
result
匹配结果图像, 类型 32F, 大小 (W-w+1)-(H-h+1)
method
匹配方法,根据实际情况而定:
TM_SQDIFF:
平方差进行匹配,最佳的匹配结果在结果为0处,值越大匹配结果越差
TM_SQDIFF_NORMED:
归一化的平方差进行匹配,最佳匹配在结果为0处
TM_CCORR:
相关性匹配方法,该方法使用源图像与模板图像的卷积结果进行匹配,最佳匹配位置在值最大处,值越小匹配结果越差
TM_CCORR_NORMED:
归一化的相关性匹配方法,与相关性匹配方法类似,最佳匹配位置也是在值最大处
TM_CCOEFF:
相关性系数匹配方法,该方法使用源图像与其均值的差、模板与其均值的差二者之间的相关性进行匹配,最佳匹配结果在值等于1处,最差匹配结果在值等于-1处,值等于0直接表示二者不相关
TM_CCOEFF_NORMED:
归一化的相关性系数匹配方法,正值表示匹配的结果较好,负值则表示匹配的效果较差,也是值越大,匹配效果也好

平方差匹配到相关系数匹配,匹配的准确度越来越高(计算代价也随之升高). 通常可测试实验,选择同时兼顾速度和精度的最佳匹配方法

//! finds global minimum and maximum array elements and returns their values and their locations
CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,
                           CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0,
                           CV_OUT Point* maxLoc=0, InputArray mask=noArray());

JNI端程序:

tempray.cpp:

#include <string>  
#include <jni.h>
#include <android/log.h>
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/ml/ml.hpp>

#ifdef __cplusplus
extern "C" {
#endif

//#define LOG_TAG "show infomation"
#define Template 100

#define LOG_TAG "libtracking"
#define  LOGI(...)  __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define  LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define  LOGE(...)  __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)

using namespace cv;  
using namespace std;  

int result_cols;
int result_rows;
double minVal;
double maxVal;
Point minLoc;
Point maxLoc;
Point matchLoc;
int match_method = TM_CCORR;//CV_TM_SQDIFF;

JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_grayProc(JNIEnv* env, jclass obj, jlong imageGray,
        jint touch_x,jint touch_y)
{

    int width,height;
    int k = 0,n = 0,i,j;
    CvScalar s;

    Mat result;
    Mat mat = Mat(*((Mat*)imageGray));
    width = mat.rows;
    height = mat.cols;

    result_cols = height - Template + 1;
    result_rows = width - Template + 1;

    cv::Mat img = cv::Mat(Template,Template,CV_8UC3,cv::Scalar(0,0,0));
    IplImage pI = mat;
    IplImage pI_2 = img;

    Mat img_display = cv::Mat(width,height,CV_8UC3,cv::Scalar(0,0,0));
    IplImage pI_3 = img_display;
    for(i=0;i<width;i++){
        for(j=0;j<height;j++){
            s = cvGet2D(&pI,i,j);
            cvSet2D(&pI_3,i,j,s);
        }
    }

    if((touch_x < (Template/2)) || (touch_x>(height-(Template/2))) || (touch_y < (Template/2)) || (touch_y > (width-(Template/2)))){
        //__android_log_print(ANDROID_LOG_ERROR, "JNITag","touch_x,touch_y is too small or too large\n");
        LOGD("touch_x = %d, touch_y = %d, width = %d, height = %d", touch_x, touch_y, width, height);
        return;
    }

    for(i=(touch_x-(Template/2));i<(touch_x+(Template/2));i++){
        for(j=(touch_y-(Template/2));j<(touch_y+(Template/2));j++){
            s = cvGet2D(&pI,j,i);
            cvSet2D(&pI_2,k,n,s);
            n++;
        }   
        k++;
        n=0;
    }

    /*
    result.create(result_cols, result_rows, CV_32FC1 );// 模板匹配存储Mat
    */

    /// Do the Matching and Normalize
    matchTemplate( img_display, img, img_display, match_method);
    normalize(img_display, img_display, 0, 1, NORM_MINMAX, -1, Mat() );
    /// Localizing the best match with minMaxLoc

    minMaxLoc(img_display, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
    /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
    if(match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ){
        matchLoc = minLoc;
    }else{
        matchLoc = maxLoc;
    }

    rectangle(mat, matchLoc, Point(matchLoc.x + img.cols , matchLoc.y + img.rows ), Scalar::all(0), 2, 8, 0);

}

int run_time;

JNIEXPORT jlong JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_grayProc0(JNIEnv* env, jclass obj, jlong imageGray, jlong tempGray)
{
    Mat img_display =  Mat(*(Mat*)imageGray); //Mat: JAVA->JNI, 取地址
    Mat img  =  Mat(*(Mat*)tempGray);

    Mat input;
    input.create(img_display.size(), CV_8UC3);
    cvtColor(img_display, input, CV_BGRA2BGR);

    /// Do the Matching and Normalize,按NORM_MINMAX进行匹配后的归一化
    matchTemplate( img_display, img, img_display, match_method);
    normalize(img_display, img_display, 0, 1, NORM_MINMAX, -1, Mat()); 

    // Localizing the best match with minMaxLoc,模板在原图像中匹配的最大/最小值点及位置
    minMaxLoc(img_display, &minVal, &maxVal, &minLoc, &maxLoc, Mat());

    // For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
    if(match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ){
        matchLoc = minLoc;
    }else{
        matchLoc = maxLoc;
    }

    LOGD("matchLoc.x = %d, matchLoc.y = %d, img.cols = %d, img.rows = %d", matchLoc.x, matchLoc.y, img.cols, img.rows);

    //框出查找到的原图像中匹配的模板图像
    rectangle(input, matchLoc, Point(matchLoc.x + img.cols , matchLoc.y + img.rows ), Scalar(255,0,0), 2, 8, 0);//Scalar::all(0)

    Mat *mat = new Mat(input);
    return (jlong) mat; // 返回地址,Mat: JNI->JAVA
}

#ifdef __cplusplus
}
#endif

TM_CCOEFF:

opencv模板匹配方法 opencv matchtemplate_#define

TM_SQDIFF:

opencv模板匹配方法 opencv matchtemplate_#include_02