1. 程式人生 > 實用技巧 >opencv實戰-全景影象拼接

opencv實戰-全景影象拼接

一、全景影象拼接步驟

1、使用SIFT演算法尋找關鍵特徵點

2、建立BFMatcher匹配器將圖片特徵點進行匹配

3、特徵點多於4個則可以計算視角變換矩陣

4、將圖片經過變換矩陣變換

5、圖片變換過後進行拼接

二、參考程式碼

import numpy as np
import cv2

class Stitcher:
    # 拼接函式
    def stitch(self, images, ratio=0.75, reprojThresh=4.0, showMatches=False):
        # 獲取輸入圖片
        (imageB, imageA) = images
        
# 檢測A、B圖片的SIFT關鍵特徵點,並計算特徵描述子 (kpsA, featuresA) = self.detectAndDescribe(imageA) (kpsB, featuresB) = self.detectAndDescribe(imageB) # 匹配兩張圖片的所有特徵點,返回匹配結果 M = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh) # 如果返回結果為空,沒有匹配成功的特徵點,退出演算法 if
M is None: return None # 否則,提取匹配結果 # H是3x3視角變換矩陣 (matches, H, status) = M # 將圖片A進行視角變換,result是變換後圖片 result = cv2.warpPerspective(imageA, H, (imageA.shape[1] + imageB.shape[1], imageA.shape[0])) self.cv_show('result1', result) # 將圖片B傳入result圖片最左端
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB self.cv_show('result2', result) # 檢測是否需要顯示圖片匹配 if showMatches: # 生成匹配圖片 vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches, status) # 返回結果 return (result, vis) # 返回匹配結果 return result def cv_show(self, name, img): cv2.imshow(name, img) cv2.waitKey(0) cv2.destroyAllWindows() def detectAndDescribe(self, image): # 將彩色圖片轉換成灰度圖 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 建立SIFT生成器 descriptor = cv2.xfeatures2d.SIFT_create() # 檢測SIFT特徵點,並計算描述子 (kps, features) = descriptor.detectAndCompute(image, None) # 將結果轉換成NumPy陣列 kps = np.float32([kp.pt for kp in kps]) # 返回特徵點集,及對應的描述特徵 return (kps, features) def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh): # 建立暴力匹配器 matcher = cv2.BFMatcher() # 使用KNN檢測來自A、B圖的SIFT特徵匹配對,K=2 rawMatches = matcher.knnMatch(featuresA, featuresB, 2) matches = [] for m in rawMatches: # 當最近距離跟次近距離的比值小於ratio值時,保留此匹配對 if len(m) == 2 and m[0].distance < m[1].distance * ratio: # 儲存兩個點在featuresA, featuresB中的索引值, 既是特徵點的索引值 matches.append((m[0].trainIdx, m[0].queryIdx)) # 當篩選後的匹配對大於4時,計算視角變換矩陣 if len(matches) > 4: # 獲取匹配對的點座標 ptsA = np.float32([kpsA[i] for (_, i) in matches]) ptsB = np.float32([kpsB[i] for (i, _) in matches]) # 計算視角變換矩陣 (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh) # 返回結果 return (matches, H, status) # 如果匹配對小於4時,返回None return None def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status): # 初始化視覺化圖片,將A、B圖左右連線到一起 (hA, wA) = imageA.shape[:2] (hB, wB) = imageB.shape[:2] vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8") vis[0:hA, 0:wA] = imageA vis[0:hB, wA:] = imageB # 聯合遍歷,畫出匹配對 for ((trainIdx, queryIdx), s) in zip(matches, status): # 當點對匹配成功時,畫到視覺化圖上 if s == 1: # 畫出匹配對 ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1])) ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1])) cv2.line(vis, ptA, ptB, (0, 255, 0), 1) # 返回視覺化結果 return vis if __name__ == '__main__': imageA = cv2.imread("left_01.png") imageB = cv2.imread("right_01.png") stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0) cv2.destroyAllWindows()