1. 程式人生 > 其它 >OpenCV之使用EigenFaceRecognizer來實現人臉識別

OpenCV之使用EigenFaceRecognizer來實現人臉識別

一、概述

  案例:使用EigenFaceRecognizer來實現人臉識別演算法

  實現步驟:

    1.準備人臉資料(人臉和人臉對應的標籤),ps:預留一個或幾個樣本用來測試

    2.將樣本資料和樣本對應的標籤資料從檔案中讀取出來並分別存入集合

    3.例項化EigenFaceRecognizer

    4.將準備好的人臉集合和標籤集合放入EigenFaceRecognizer.train函式中進行訓練

    5.訓練好資料後執行predict方法進行預測

    6.假如預留樣本的標籤值與執行predict預測後的標籤值是一致的就說明我們預測成功了。

  ps:使用這個演算法來實現人臉識別時樣本影象和實際的影象大小必須要要一致,否則演算法會出現不工作的情況。

二、程式碼示例

Face_Eigen_Face_Recognizer::Face_Eigen_Face_Recognizer(QWidget *parent)
    : MyGraphicsView{parent}
{
    this->setWindowTitle("特徵臉識別器");
    QPushButton * btn = new QPushButton(this);
    btn->setText("讀取資料");
    connect(btn,&QPushButton::clicked,[=](){
        QString srcDirPath 
= QFileDialog::getExistingDirectory( this, "choose src Directory", "/Users/yangwei/Documents/tony/opencv/orl_faces"); if (srcDirPath.isEmpty()) { return; } else { string filename = string("/Users/yangwei/Documents/tony/opencv/orl_faces/targetData.txt
"); out.open(filename,ios::out); qDebug() << "srcDirPath=" << srcDirPath; srcDirPath += "/"; prepareImageData(srcDirPath.toStdString().c_str(),""); out.close(); } }); QPushButton *btnShow = new QPushButton(this); btnShow->move(0,btn->y()+btn->height()+5); btnShow->setText("開始檢測特徵臉"); connect(btnShow,&QPushButton::clicked,[=](){ showEgenFaceRecoginzer(""); }); } void Face_Eigen_Face_Recognizer::dropEvent(QDropEvent *event){ path = event->mimeData()->urls().at(0).toLocalFile(); showEgenFaceRecoginzer(path.toStdString().c_str()); } void Face_Eigen_Face_Recognizer::showEgenFaceRecoginzer(const char * filePath){ string filename = string("/Users/yangwei/Documents/tony/opencv/orl_faces/targetData.txt"); ifstream file(filename,ifstream::in); string line,path,classLabel;//行、路徑、標籤 vector<Mat> images; vector<int> labels; while(getline(file,line)){ stringstream liness(line); getline(liness,path,' '); getline(liness,classLabel); // if (!path.empty() && !labels.empty()) { cout << "path :"<< classLabel.c_str()<<endl;; images.push_back(imread(path, 0)); labels.push_back(atoi(classLabel.c_str())); // } } file.close(); if (images.size() < 1 || labels.size() < 1) { qDebug()<<"invalid image path...\n"; return; } int width = images[0].cols; int height = images[0].rows; cout << "width:"<<width<<"|"<<"height:"<<height<<endl; //準備測試資料和測試label Mat testMatSample = images[images.size()-1]; int testLabel = labels[labels.size()-1]; imshow("testMatSample",testMatSample); images.pop_back(); labels.pop_back(); //接下來就是最重要的步驟 //1.訓練 Ptr<BasicFaceRecognizer> model = EigenFaceRecognizer::create(); model->train(images,labels); //2.預測 int predictedLabel = model->predict(testMatSample); //此處如果樣本和預測結果是一致的就說明此次識別是演算法是成功的 cout << "testLabel:"<<testLabel<<endl; cout <<"predictedLabel:"<<predictedLabel<<endl; //從訓練結果中獲取均值、特徵向量、特徵值矩陣 Mat eigenvalues = model->getEigenValues(); Mat eigenvectors = model->getEigenVectors(); Mat mean = model->getMean(); //得到均值臉 Mat meanFace = mean.reshape(1,height); Mat dst; //歸一化0~255並輸出 if(meanFace.channels()==1){//單通道影象 normalize(meanFace,dst,0,255,NORM_MINMAX,CV_8UC1); }else{//多通道影象 normalize(meanFace,dst,0,255,NORM_MINMAX,CV_8UC3); } imshow("dist",dst); // //輸出特徵臉 // for (int i = 0; i < min(16, eigenvectors.cols); i++) { // Mat ev = eigenvectors.col(i).clone(); // Mat grayscale; // Mat eigenFace = ev.reshape(1, height); // if (eigenFace.channels() == 1) { // normalize(eigenFace, grayscale, 0, 255, NORM_MINMAX, CV_8UC1); // } // else if (eigenFace.channels() == 3) { // normalize(eigenFace, grayscale, 0, 255, NORM_MINMAX, CV_8UC3); // } // Mat colorface; // applyColorMap(grayscale, colorface, COLORMAP_BONE); // char* winTitle = new char[128]; // sprintf(winTitle, "eigenface_%d", i); // imshow(winTitle, colorface); // } // for (int num = 0; num < min(eigenvectors.cols, 16); num++) { // Mat evs = eigenvectors.col(num); // Mat projection = LDA::subspaceProject(evs, mean, images[0].reshape(1, 1)); // Mat reconstruction = LDA::subspaceReconstruct(evs, mean, projection); // Mat result = reconstruction.reshape(1, height); // if (result.channels() == 1) { // normalize(result, reconstruction, 0, 255, NORM_MINMAX, CV_8UC1); // } // else if (result.channels() == 3) { // normalize(result, reconstruction, 0, 255, NORM_MINMAX, CV_8UC3); // } // char* winTitle = new char[128]; // sprintf(winTitle, "recon_face_%d", num); // imshow(winTitle, reconstruction); // } }

 

三、演示影象

  預測結果: