【OpenCV3】基於雙目視覺的三維重建
阿新 • • 發佈:2019-01-23
opencv中雙目視覺立體重建根據的是三角形原理,在經過攝像機立體標定之後獲取到單個攝像機的引數和雙目系統的立體引數,根據三角形原理,我們即可實現對點雲的三維重建,這裡我們只介紹對單個點的三維重建。
具體見如下程式碼:
#include <string> #include <opencv2\opencv.hpp> //從.yml檔案中讀取攝像機引數 bool load_calibration_yml(std::string& file_name, cv::Mat& cam_k_left, cv::Mat& cam_k_right, cv::Mat& cam_kc_left, cv::Mat& cam_kc_right, cv::Mat& R, cv::Mat& T) { cv::FileStorage fs(file_name, cv::FileStorage::READ); if (!fs.isOpened()) { return false; } fs["cam_K_left"] >> cam_k_left; if (3 != cam_k_left.rows || 3 != cam_k_left.cols) { std::cout << "Load cam_K_left failed" << std::endl; return false; } fs["cam_kc_left"] >> cam_kc_left; if (1 != cam_kc_left.rows || 5 != cam_kc_left.cols) { std::cout << "Load cam_kc_left failed" << std::endl; return false; } fs["cam_K_right"] >> cam_k_right; if (3 != cam_k_right.rows || 3 != cam_k_right.cols) { std::cout << "Load cam_K_right failed" << std::endl; return false; } fs["cam_kc_right"] >> cam_kc_right; if (1 != cam_kc_left.rows || 5 != cam_kc_left.cols) { std::cout << "Load cam_kc_right failed" << std::endl; return false; } fs["R"] >> R; if (3 != R.rows || 3 != R.cols) { std::cout << "Load R failed" << std::endl; return false; } fs["T"] >> T; if (3 != T.rows || 1 != T.cols) { std::cout << "Load T failed" << std::endl; return false; } return false; } //對單個點進行畸變校正 void undist_point(cv::Point2d& input, cv::Point2d& output, cv::Mat& K, cv::Mat& kc) { double x = (input.x - K.at<double>(0, 2)) / K.at<double>(0, 0); double y = (input.y - K.at<double>(1, 2)) / K.at<double>(1, 1); double r = std::sqrt(std::pow(x, 2) + std::pow(y, 2)); double theta = std::atan(r / FOCAL_LENTH); double theta_3 = std::pow(theta, 3); double theta_5 = std::pow(theta, 5); double theta_7 = std::pow(theta, 7); double theta_9 = std::pow(theta, 9); double theta_f = theta * kc.at<double>(0, 0) + theta_3 * kc.at<double>(0, 1) + theta_5 * kc.at<double>(0, 2) + theta_7 * kc.at<double>(0, 3) + theta_9 * kc.at<double>(0, 4); //std::cout << "theta_f= " << theta_f << std::endl; double phi = std::atan(std::abs(y) / std::abs(x)); output.x = x / std::abs(x) * std::cos(phi) * theta_f * K.at<double>(0, 0) + K.at<double>(0, 2); output.y = y / std::abs(y) * std::sin(phi) * theta_f * K.at<double>(1, 1) + K.at<double>(1, 2); return; } //對單個點進行三角化立體重建 void triangulate_stereo(cv::Mat & K1, cv::Mat & kc1, cv::Mat & K2, cv::Mat & kc2, cv::Mat & Rt, cv::Mat & T, cv::Point2d & p1, cv::Point2d & p2, cv::Point3d & p3d) { cv::Point2d outp1, outp2; undist_point(p1, outp1, K1, kc1); undist_point(p2, outp2, K2, kc2); //std::cout << "undist_left = " << outp1 << std::endl; //std::cout << "undist_right = " << outp2 << std::endl; double f1 = std::sqrt(std::pow(K1.at<double>(0, 0), 2) + std::pow(K1.at<double>(1, 1), 2)) * FOCAL_LENTH; double f2 = std::sqrt(std::pow(K2.at<double>(0, 0), 2) + std::pow(K2.at<double>(1, 1), 2)) * FOCAL_LENTH; cv::Point3d u1(outp1.x, outp1.y, f1); cv::Point3d u2(outp2.x, outp2.y, f2); cv::Point3d w1 = u1; cv::Point3d w2 = cv::Point3d(cv::Mat(Rt*(cv::Mat(u2)))); //std::cout << "w1 = " << w1 << std::endl; //std::cout << "w2 = " << w2 << std::endl; double disparity = w1.x - w2.x; double X = w1.x - K1.at<double>(0, 2); double Y = w1.y - K1.at<double>(1, 2); double Z = f1; double W = disparity * (-1/T.at<double>(0, 0)) + (K1.at<double>(0, 2) - K2.at<double>(0, 2))/T.at<double>(0, 0); p3d.x = X / W; p3d.y = Y / W; p3d.z = Z / W; return; }
2017.05.11