利用OpenCV提取SIFT特徵以及RANSAC篩選後的匹配
阿新 • • 發佈:2018-12-09
近來研究如何利用OpenCV中SIFT特徵提取和RANSAC篩選進行特徵匹配,發現很多版本的程式碼不在支援較新版本的OpenCV用法,本篇文章也是借鑑了其他博主的經驗分享,整理而成,希望對大家有所幫助。
#include "cv.h"
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
//讀取影象
Mat img_test=imread("1.bmp");
Mat img_template=imread("2.bmp");
//SIFT特徵檢測
SiftFeatureDetector detector; //定義特徵點檢測器
vector<KeyPoint> keypoint1,keypoint2; //定義兩個容器存放特徵點
detector.detect(img_test,keypoint1);
detector.detect(img_template,keypoint2);
或者
Ptr<FeatureDetector> sift_detector = FeatureDetector::create("SIFT" );
sift_detector ->detector(img_test,keypoint1);
sift_detector ->detector(img_template,keypoint2);
//特徵點顯示 Mat out_img1; Mat out_img2; drawKeypoints(img_test,keypoint1,out_img1); drawKeypoints(img_template,keypoint2,out_img2); imshow("特徵點圖1",out_img1); imshow("特徵點圖2",out_img //提取特徵點的特徵描述子(特徵向量)(128維) SiftDescriptorExtractor extractor; Mat descriptor1,descriptor2; extractor.compute(img1,keypoint1,descriptor1); extractor.compute(img2,keypoint2,descriptor2);
或者
Ptr< DescriptorExtractor> sift_desc_extract = DescriptorExtractor::create(“SIFT”); sift_desc_extract-> compute(img1,keypoint1,descriptor1); sift_desc_extract-> compute(img2,keypoint2,descriptor2);
//匹配,主要計算兩個特徵點特徵向量的歐式距離,距離小於某個閾值則認為匹配
BruteForceMatcher<L2<float>> matcher;
vector<DMatch> matches;
Mat img_matches;
matcher.match(descriptor1,descriptor2,matches);
或者 Ptr bruteforce_matcher = Descriptormatcher::create(“BruteForce”); bruteforce_matcher->match(descriptor1,descriptor2,matches);
drawMatches(img1,keypoint1,img2,keypoint2,matches,img_matches);
imshow("原始匹配",img_matches);1
//下面是利用RANSAC進行消除無匹配點: //RANSAC 消除誤匹配特徵點 主要分為三個部分: //1)根據matches將特徵點對齊,將座標轉換為float型別 //2)使用求基礎矩陣方法 findFundamentalMat,得到RansacStatus //3)根據RansacStatus來將誤匹配的點也即RansacStatus[i]=0的點刪除
//根據matches將特徵點對齊,將座標轉換為float型別
vector<KeyPoint> R_keypoint1,R_keypoint2;
for (size_t i=0;i<matches.size();i++)
{
R_keypoint1.push_back(keypoint1[matches[i].queryIdx]);
R_keypoint2.push_back(keypoint2[matches[i].trainIdx]);
//這兩句話的理解:R_keypoint1是要儲存img1中能與img2匹配的特徵點,
//matches中儲存了這些匹配點對的img1和img2的索引值
}
//座標轉換
vector<Point2f>p1,p2;
for (size_t i=0;i<matches.size();i++)
{
p1.push_back(R_keypoint1[i].pt);
p2.push_back(R_keypoint2[i].pt);
}
//利用基礎矩陣剔除誤匹配點
vector<uchar> RansacStatus;
Mat Fundamental= findFundamentalMat(p01,p02,RansacStatus,FM_RANSAC);
vector<KeyPoint> RR_keypoint1,RR_keypoint2;
vector<DMatch> RR_matches; //重新定義RR_keypoint 和RR_matches來儲存新的關鍵點和匹配矩陣
int index=0;
for (size_t i=0;i<matches.size();i++)
{
if (RansacStatus[i]!=0)
{
RR_keypoint1.push_back(R_keypoint1[i]);
RR_keypoint2.push_back(R_keypoint2[i]);
matches[i].queryIdx=index;
matches[i].trainIdx=index;
RR_matches.push_back(matches[i]);
index++;
}
}
Mat img_RR_matches;
drawMatches(img01,RR_keypoint1,img2,RR_keypoint2,RR_matches,img_RR_matches);
imshow("After RANSAC Image",img_RR_matches);
“`