特徵檢測與特徵匹配
一、使用surf演算法實現
1、繪製關鍵點函式
void drawKeyPoints(const Mat &image,const vector<KeyPoint>&keypoints,Mat &outImage,const Scalar &color=Scalar::all(-1),int flags = DrawMatchesFlags::DEFAULT)
引數一:輸入影象
引數二:跟據影象得到的特徵點
引數三:輸出影象,其內容取決於引數五
引數四:關鍵點的顏色
引數五:繪製關鍵點的特徵識別符號
DEFAULT=0;對每一個關鍵點只繪製中間點
DRAW_OVER_OUTIMG=1;不建立輸出影象陣列,而是在輸出影象上繪製匹配對
NOT_DRAW_SINGLE_POINTS=2;單點特徵點不被繪製
DRAW_RITCH_KEYPOINTS=4;對每一個關鍵點,繪製待大小和方向的關鍵點圓圈
2、繪製相匹配的兩影象的特徵點
(1)、void drawMatches(const Mat &img1,
const vetor<KeyPoint>& keypoints1,
const Mat &img2,
const vector<KeyPoint>&keypoints2,
const vector<DMatch>&matches1to2,
Mat &outImg,
const Scalar &machColor=Scalar::all(-1),
const Scalar &singlePointColor=Scalar::all(-1),
const vector<char>&matchesMask=vector<char>(),
int flags=DrawMatchesFlags::DEFAULT)
(2)、
void drawMatches(const Mat &img1,
const vetor<KeyPoint>& keypoints1,
const Mat &img2,
const vector<KeyPoint>&keypoints2,
const vector<vector<DMatch> >&matches1to2,
Mat &outImg,
const Scalar &matchColor=Scalar::all(-1),
const Scalar &singlePointColor=Scalar::all(-1),
const vector<vector<char> >&matchesMask=vector<char>(),
int flags=DrawMatchesFlags::DEFAULT)
引數一:第一幅影象
引數二:第一幅影象的特徵點
引數三:第二幅影象
引數四:第二幅影象的特徵點
引數五:第一幅影象到第二幅影象的特徵點
引數六:輸出影象,其內容取決於引數十flags
引數七:匹配的顏色,即線和關鍵點的顏色
引數八:單一特徵點的顏色
引數九:確定哪些匹配要進行掩摸
引數十:識別符號
DEFAULT=0;對每一個關鍵點只繪製中間點
DRAW_OVER_OUTIMG=1;不建立輸出影象陣列,而是在輸出影象上繪製匹配對
NOT_DRAW_SINGLE_POINTS=2;單點特徵點不被繪製
DRAW_RITCH_KEYPOINTS=4;對每一個關鍵點,繪製待大小和方向的關鍵點圓圈
示例1:繪製特徵點
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"讀取圖片出錯"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
Mat img1,img2;
drawKeypoints(srcImage1,keyPoint1,img1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
drawKeypoints(srcImage2,keyPoint1,img2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
imshow("img1",img1);
imshow("img2",img2);
waitKey(0);
return 0;
}
示例2:特徵提取
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"讀取圖片出錯"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 700;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
//計算特徵向量
SurfDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
extractor.compute(srcImage1,keyPoint1,descriptor1);
extractor.compute(srcImage2,keyPoint2,descriptor2);
//例項化一個匹配器
BruteForceMatcher<L2<float> >matcher;
vector<DMatch >matches;
matcher.match(descriptor1,descriptor2,matches);
Mat imageMatches;
drawMatches(srcImage1,keyPoint1,srcImage2,keyPoint2,matches,imageMatches);
imshow("匹配圖",imageMatches);
waitKey(0);
return 0;
}
二、使用flann演算法實現
void DescriptorMatcher::match(
const Mat &queryDescriptors,//查詢描述符集
const Mat &trainDescriptors,//訓練描述符集
vector<DMatch>&matches,//匹配結果集
const Mat& mask=Mat())//指定輸入查詢和訓練描述符允許匹配的掩摸
void DescriptorMatcher::match(
const Mat &queryDescriptors,//查詢描述符集
vector<DMatch>&matches,//匹配結果集
const vector<Mat>&masks = vector<Mat>())//一組掩摸,指定輸入查詢和訓練描述符允許匹配的掩摸
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"讀取圖片出錯"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 300;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
//計算特徵向量
SurfDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
extractor.compute(srcImage1,keyPoint1,descriptor1);
extractor.compute(srcImage2,keyPoint2,descriptor2);
//例項化一個匹配器
FlannBasedMatcher matcher;
vector<DMatch >matches;
matcher.match(descriptor1,descriptor2,matches);
double max_dist =0,min_dist =100;
for(int i=0;i<descriptors1.rows;i++)
{
double dist = matches[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
printf("max_dist:%f\n",max_dist);
printf("min_dist:%f\n",min_dist);
vector<DMatch>good_matches;
for(int i=0;i<descriptor1.rows;i++)
{
if(matches[i].distance < 2*min_dist)
{good_matches.push_back(matches[i]);}
}
Mat imageMatches;
drawMatches(srcImage1,keyPoint1,srcImage2,keyPoint2,
good_matches,imageMatches,Scalar::all(-1),Scalar::all(-1),
vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
for(int i=0;i<good_matches.size();i++)
{
printf("符合條件的匹配點[%d]特徵點1:%d--特徵點2:%d\n",
i,good_matches[i].queryIdx,good_matches[i].trainIdx);
}
imshow("匹配圖",imageMatches);
waitKey(0);
return 0;
}
三、使用surf檢測特徵點,flann匹配
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
//載入影象並轉化為灰度圖
Mat trainImage = imread(argv[1]);
Mat trainImage_gray;
imshow("原始圖",trainImage);
cvtColor(trainImage,trainImage_gray,COLOR_BGR2GRAY);
//檢測surf關鍵點、提取訓練影象描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SurfFeaturDetector featureDetector(80);
featureDetector.detect(trainImage_gray,train_keyPoint);
SurfDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray,train_keyPoint,trainDescriptor);
//建立基於FLANN的描述符匹配物件
FlannBaseMatcher matcher;
vector<Mat>train_desc_collection(1,trainDescriptor);
matcher,add(train_desc_collection);
matcher.train();
//建立視訊物件、定義頻率
VideoCapture cap(0);
unsigned int frameCount = 0;
//迴圈處理,直到按下q鍵
while(char(waitKey(1)) != 'q')
{
int64 time0 = getTickCount();
Mat testImage,testImage_gray;
cap >> testImage;
if(testImage.empty())
continue;
cvtColor(testImage,testImage_gray,COLOR_BGR2GRAY);
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray,test_keyPoint);
featureExtractor.compute(testImage_gray,test_keyPoint,testDescriptor);
//匹配訓練和測試描述符
vector<vector<DMatch> >matches;
matcher.knnMatch(testDescriptor,matches,2);
//根據勞氏演算法,得到優秀的匹配點
vector<DMatch>goodMatches;
for(unsigned int i=0;i<matches,size();i++)
{
if(matches[i][0].distance < 0.6*matches[i][1].distance)
goodMatches.push_back(matches[i][0])
}
//繪製匹配點並顯示視窗
Mat dstImage;
drawMatches(testImage,test_keyPoint,trainImage,
train_keyPoint,goodMatches,dstImage);
imshow("匹配視窗",dstImage);
cout<<"當前頻率為:"<<getTickFrequency()/(getTickCount-time0)<<endl;
}
return 0;
}
四、SIFT演算法暴力匹配(比SURF演算法慢3倍)
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
//載入影象並轉化為灰度圖
Mat trainImage = imread(argv[1]);
Mat trainImage_gray;
imshow("原始圖",trainImage);
cvtColor(trainImage,trainImage_gray,COLOR_BGR2GRAY);
//檢測surf關鍵點、提取訓練影象描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SiftFeaturDetector featureDetector(80);
featureDetector.detect(trainImage_gray,train_keyPoint);
SiftDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray,train_keyPoint,trainDescriptor);
//建立基於FLANN的描述符匹配物件
BFMatcher matcher;
vector<Mat>train_desc_collection(1,trainDescriptor);
matcher,add(train_desc_collection);
matcher.train();
//建立視訊物件、定義頻率
VideoCapture cap(0);
unsigned int frameCount = 0;
//迴圈處理,直到按下q鍵
while(char(waitKey(1)) != 'q')
{
int64 time0 = getTickCount();
Mat testImage,testImage_gray;
cap >> testImage;
if(testImage.empty())
continue;
cvtColor(testImage,testImage_gray,COLOR_BGR2GRAY);
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray,test_keyPoint);
featureExtractor.compute(testImage_gray,test_keyPoint,testDescriptor);
//匹配訓練和測試描述符
vector<vector<DMatch> >matches;
matcher.knnMatch(testDescriptor,matches,2);
//根據勞氏演算法,得到優秀的匹配點
vector<DMatch>goodMatches;
for(unsigned int i=0;i<matches,size();i++)
{
if(matches[i][0].distance < 0.6*matches[i][1].distance)
goodMatches.push_back(matches[i][0])
}
//繪製匹配點並顯示視窗
Mat dstImage;
drawMatches(testImage,test_keyPoint,trainImage,
train_keyPoint,goodMatches,dstImage);
imshow("匹配視窗",dstImage);
cout<<"當前頻率為:"<<getTickFrequency()/(getTickCount-time0)<<endl;
}
return 0;
}
五、尋找已知物體
步驟:
(1)、使用函式findHomography尋找匹配上的關鍵點的變換
(2)、使用函式perspectiveTranform來對映點
1、尋找透視變換矩陣
void findHomography(
InputArray srcPoints,
InputArray dstPoints,
int method = 0,
double ransacReprojThreshold = 3,
OutputArray mask = noArray()
)
引數一:源平面上對應的點
引數二:目標平面對應的點
引數三:可選識別符號
0 使用所有點的常規方法
CV_RANSAC 基於RANSAC魯棒性的方法
CV_LMEDS 最小中值魯棒性方法
引數四:取值範圍1-10,處理點對為內層時,允許重投影誤差的最大值。
引數五:可選掩摸,輸入掩摸值會忽略魯棒性
2、進行透視矩陣變換
void perspectiveTransform(InputArray src,InputArray dst,OutputArray m)
引數一:輸入影象
引數二:輸出結果
引數三:變換矩陣3*3或4*4