SVO 特征對齊代碼分析
阿新 • • 發佈:2018-09-03
mon 特征 HERE tar 變化 文件 優化 需要 poi
SVO稀疏圖像對齊之後使用特征對齊,即通過地圖向當前幀投影,並使用逆向組合光流以稀疏圖像對齊的結果為初始值,得到更精確的特征位置。
主要涉及文件:
reprojector.cpp
matcher.cpp
feature_alignment.cpp
point.cpp
map.cpp
1.入口函數:
void Reprojector::reprojectMap( FramePtr frame, std::vector< std::pair<FramePtr,std::size_t> >& overlap_kfs) { resetGrid();// Identify those Keyframes which share a common field of view. SVO_START_TIMER("reproject_kfs"); //計算當前地圖中的關鍵幀與當前幀frame具有共視關系的關鍵幀,並返回兩幀之間的距離; list< pair<FramePtr,double> > close_kfs; map_.getCloseKeyframes(frame, close_kfs); //按照距離進行排序; // Sort KFs with overlap according to their closenessclose_kfs.sort(boost::bind(&std::pair<FramePtr, double>::second, _1) < boost::bind(&std::pair<FramePtr, double>::second, _2)); // Reproject all mappoints of the closest N kfs with overlap. We only store // in which grid cell the points fall. size_t n = 0; overlap_kfs.reserve(options_.max_n_kfs);//關鍵幀共視關系數量限制; for(auto it_frame=close_kfs.begin(), ite_frame=close_kfs.end(); it_frame!=ite_frame && n<options_.max_n_kfs; ++it_frame, ++n) { FramePtr ref_frame = it_frame->first; ref_frame->debug_img_ = ref_frame->img().clone(); //every reproject iteration, the debug_img_ should be reinition overlap_kfs.push_back(pair<FramePtr,size_t>(ref_frame,0)); // Try to reproject each mappoint that the other KF observes for(auto it_ftr=ref_frame->fts_.begin(), ite_ftr=ref_frame->fts_.end(); it_ftr!=ite_ftr; ++it_ftr) { // check if the feature has a mappoint assigned if((*it_ftr)->point == NULL) continue; //判斷該特征是否已經投影過了 // make sure we project a point only once if((*it_ftr)->point->last_projected_kf_id_ == frame->id_) continue; (*it_ftr)->point->last_projected_kf_id_ = frame->id_; //如果參考幀的一個特征在當前幀中投影成功,計數+1; if(reprojectPoint(frame, (*it_ftr)->point)) overlap_kfs.back().second++; } } SVO_STOP_TIMER("reproject_kfs");// Now project all point candidates //投影候選點; SVO_START_TIMER("reproject_candidates"); { boost::unique_lock<boost::mutex> lock(map_.point_candidates_.mut_); auto it=map_.point_candidates_.candidates_.begin(); while(it!=map_.point_candidates_.candidates_.end()) { if(!reprojectPoint(frame, it->first)) { //候選點如果一直都投影不上,說明這個點可能有問題,可以把它刪除; it->first->n_failed_reproj_ += 3; if(it->first->n_failed_reproj_ > 30) { map_.point_candidates_.deleteCandidate(*it); it = map_.point_candidates_.candidates_.erase(it); continue; } } ++it; } } // unlock the mutex when out of scope SVO_STOP_TIMER("reproject_candidates"); // Now we go through each grid cell and select one point to match. // At the end, we should have at maximum one reprojected point per cell. SVO_START_TIMER("feature_align"); for(size_t i=0; i<grid_.cells.size(); ++i) { // we prefer good quality points over unkown quality (more likely to match) // and unknown quality over candidates (position not optimized) //隨機的投影每個cell,每投影成功一次+1; if(reprojectCell(*grid_.cells.at(grid_.cell_order[i]), frame)) ++n_matches_; if(n_matches_ > (size_t) Config::maxFts()) break; } SVO_STOP_TIMER("feature_align"); }
2. 計算獲得和frame具有共視關系的幀,並返回這些幀和與frame的距離:
//計算frame和當前地圖關鍵幀中具有共視關系的幀,並計算兩幀之間的距離; void Map::getCloseKeyframes( const FramePtr& frame, std::list< std::pair<FramePtr,double> >& close_kfs) const { //遍歷所有關鍵幀; for(auto kf : keyframes_) { //遍歷關鍵幀中的特征點,計算是否具有共視關系; // check if kf has overlaping field of view with frame, use therefore KeyPoints for(auto keypoint : kf->key_pts_) { if(keypoint == nullptr) continue; //如果有共視關系,則記錄該關鍵幀和當前幀與該關鍵幀的距離; if(frame->isVisible(keypoint->point->pos_)) { close_kfs.push_back( std::make_pair( kf, (frame->T_f_w_.translation()-kf->T_f_w_.translation()).norm())); break; // this keyframe has an overlapping field of view -> add to close_kfs } } } }
3.把點投影到當前幀中對應的cell中:
bool Reprojector::reprojectPoint(FramePtr frame, Point* point) { //世界坐標,變換到當前幀下,然後投影到像素坐標; Vector2d px(frame->w2c(point->pos_)); //判斷8*8的patch是否在幀內 if(frame->cam_->isInFrame(px.cast<int>(), 8)) // 8px is the patch size in the matcher { //判斷該點落在了那個cell內,則對應的cell內增加候選點; const int k = static_cast<int>(px[1]/grid_.cell_size)*grid_.grid_n_cols + static_cast<int>(px[0]/grid_.cell_size); grid_.cells.at(k)->push_back(Candidate(point, px)); return true; } return false; }
4.投影每個cell,比較重要,
bool Reprojector::reprojectCell(Cell& cell, FramePtr frame) { cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));//質量排序; Cell::iterator it=cell.begin(); //遍歷cell中的Candidate; while(it!=cell.end()) { ++n_trials_; if(it->pt->type_ == Point::TYPE_DELETED) { it = cell.erase(it); continue; } //通過光流法估計patch的位置; bool found_match = true; if(options_.find_match_direct) found_match = matcher_.findMatchDirect(*it->pt, *frame, it->px); if(!found_match) { it->pt->n_failed_reproj_++; if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_failed_reproj_ > 15) map_.safeDeletePoint(it->pt); if(it->pt->type_ == Point::TYPE_CANDIDATE && it->pt->n_failed_reproj_ > 30) map_.point_candidates_.deleteCandidatePoint(it->pt); it = cell.erase(it); continue; } //投影成功+1; it->pt->n_succeeded_reproj_++; if(it->pt->type_ == Point::TYPE_UNKNOWN && it->pt->n_succeeded_reproj_ > 10) it->pt->type_ = Point::TYPE_GOOD; //給當前幀中增加新的feature Feature* new_feature = new Feature(frame.get(), it->px, matcher_.search_level_); frame->addFeature(new_feature); // Here we add a reference in the feature to the 3D point, the other way // round is only done if this frame is selected as keyframe. new_feature->point = it->pt; if(matcher_.ref_ftr_->type == Feature::EDGELET) { new_feature->type = Feature::EDGELET; new_feature->grad = matcher_.A_cur_ref_*matcher_.ref_ftr_->grad; new_feature->grad.normalize(); } // If the keyframe is selected and we reproject the rest, we don‘t have to // check this point anymore. it = cell.erase(it); //每個cell裏面最多有一個point; // Maximum one point per cell. return true; } return false; }
5.光流法,逆向組合式:
patch_with_border_和patch_的區別是前者是帶有border的patch,因為後面光流計算時需要計算梯度方向,使用的平均梯度,所以計算邊界梯度時需要增加一行或一列;
bool Matcher::findMatchDirect( const Point& pt, const Frame& cur_frame, Vector2d& px_cur) { //計算cur_frame與觀察到pt該點的特征中視角最小的一幀; if(!pt.getCloseViewObs(cur_frame.pos(), ref_ftr_)) { return false; } //判斷是否在圖像內部; if(!ref_ftr_->frame->cam_->isInFrame( ref_ftr_->px.cast<int>()/(1<<ref_ftr_->level), halfpatch_size_+2, ref_ftr_->level)) { return false; } //計算仿射變換矩陣2*2; // warp affine warp::getWarpMatrixAffine( *ref_ftr_->frame->cam_, *cur_frame.cam_, ref_ftr_->px, ref_ftr_->f, (ref_ftr_->frame->pos() - pt.pos_).norm(), cur_frame.T_f_w_ * ref_ftr_->frame->T_f_w_.inverse(), ref_ftr_->level, A_cur_ref_); //獲取搜索層,不明白函數原理; search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1); //計算該特征的仿射變換,由當前幀到參考幀; warp::warpAffine(A_cur_ref_, ref_ftr_->frame->img_pyr_[ref_ftr_->level], ref_ftr_->px, ref_ftr_->level, search_level_, halfpatch_size_+1, patch_with_border_); createPatchFromPatchWithBorder(); // px_cur should be set變換到搜索尺度下 Vector2d px_scaled(px_cur/(1<<search_level_)); //前面已經計算了參考幀到當前幀的仿射變換,並且已經獲得了對應的patch; bool success = false; if(ref_ftr_->type == Feature::EDGELET) { Vector2d dir_cur(A_cur_ref_*ref_ftr_->grad); dir_cur.normalize(); success = feature_alignment::align1D( cur_frame.img_pyr_[search_level_], dir_cur.cast<float>(), patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); } else { //特征對齊; success = feature_alignment::align2D( cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, options_.align_max_iter, px_scaled); } //返回原始尺度; px_cur = px_scaled * (1<<search_level_); return success; }
6.計算獲得與ftr視角最小的一幀,地圖中關鍵幀不多,速度比較快:
bool Point::getCloseViewObs(const Vector3d& framepos, Feature*& ftr) const { // TODO: get frame with same point of view AND same pyramid level! Vector3d obs_dir(framepos - pos_); obs_dir.normalize(); auto min_it=obs_.begin();//觀察到該點的關鍵幀列表; double min_cos_angle = 0; for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) { //計算該關鍵幀與該點的距離; Vector3d dir((*it)->frame->pos() - pos_); dir.normalize(); //計算余弦值; double cos_angle = obs_dir.dot(dir); //獲取視角最小的關鍵幀; if(cos_angle > min_cos_angle) { min_cos_angle = cos_angle; min_it = it; } } ftr = *min_it; //不能大於60° if(min_cos_angle < 0.5) // assume that observations larger than 60° are useless return false; return true; }
7.計算仿射變換:即patch因為視角的變換,應該具有一定的扭曲
//計算仿射變換; void getWarpMatrixAffine( const svo::AbstractCamera& cam_ref, const svo::AbstractCamera& cam_cur, const Vector2d& px_ref, const Vector3d& f_ref, const double depth_ref, const SE3& T_cur_ref, const int level_ref, Matrix2d& A_cur_ref) { // Compute affine warp matrix A_cur_ref const int halfpatch_size = 5;//5*5的窗口; const Vector3d xyz_ref(f_ref*depth_ref);//歸一化的相機坐標乘以深度; //u方向的邊界點和v方向的邊界點,註意特征所在的金字塔level Vector3d xyz_du_ref(cam_ref.cam2world(px_ref + Vector2d(halfpatch_size,0)*(1<<level_ref))); // patch tranfrom to the level0 pyr img Vector3d xyz_dv_ref(cam_ref.cam2world(px_ref + Vector2d(0,halfpatch_size)*(1<<level_ref))); // px_ref is located at level0 // attation!!!! so, A_cur_ref is only used to affine warp patch at level0 //因為xyz_du_ref返回的是歸一化的3D坐標,所以要借助xyz_ref點的深度計算; xyz_du_ref *= xyz_ref[2]/xyz_du_ref[2]; xyz_dv_ref *= xyz_ref[2]/xyz_dv_ref[2]; //上面的三個點分別投影到當前幀; const Vector2d px_cur(cam_cur.world2cam(T_cur_ref*(xyz_ref))); const Vector2d px_du(cam_cur.world2cam(T_cur_ref*(xyz_du_ref))); const Vector2d px_dv(cam_cur.world2cam(T_cur_ref*(xyz_dv_ref))); //仿射變換,其實是一種在x和y方向的變化率; A_cur_ref.col(0) = (px_du - px_cur)/halfpatch_size; A_cur_ref.col(1) = (px_dv - px_cur)/halfpatch_size; }
8.通過仿射變換計算patch值:
void warpAffine( const Matrix2d& A_cur_ref, const cv::Mat& img_ref, const Vector2d& px_ref, const int level_ref, const int search_level, const int halfpatch_size, uint8_t* patch) { const int patch_size = halfpatch_size*2 ; const Matrix2f A_ref_cur = A_cur_ref.inverse().cast<float>(); //逆向組合法,所以計算的是有當前幀到參考幀之間的變換 if(isnan(A_ref_cur(0,0))) { printf("Affine warp is NaN, probably camera has no translation\n"); // TODO return; } // Perform the warp on a larger patch. uint8_t* patch_ptr = patch; const Vector2f px_ref_pyr = px_ref.cast<float>() / (1<<level_ref); for (int y=0; y<patch_size; ++y) { for (int x=0; x<patch_size; ++x, ++patch_ptr) { Vector2f px_patch(x-halfpatch_size, y-halfpatch_size); // px_patch is locat at pyr [ref_level ] px_patch *= (1<<search_level);// 1. patch tranform to level0, because A_ref_cur is only used to affine warp level0 patch const Vector2f px(A_ref_cur*px_patch + px_ref_pyr); // 2. then, use A_ref_cur to affine warp the patch if (px[0]<0 || px[1]<0 || px[0]>=img_ref.cols-1 || px[1]>=img_ref.rows-1) *patch_ptr = 0; else { //雙線性插值 *patch_ptr = (uint8_t) interpolateMat_8u(img_ref, px[0], px[1]); // img_ref is the img at pyr[level] } } } }
9.給帶邊界的patch賦值:
//主要是給patch_填值; void Matcher::createPatchFromPatchWithBorder() { uint8_t* ref_patch_ptr = patch_; //為什麽從1開始?因為橫縱都+1,為什麽橫縱都+2,因為後面光流時要計算梯度,用到了patch外的一行或一列數據 for(int y=1; y<patch_size_+1; ++y, ref_patch_ptr += patch_size_) { uint8_t* ref_patch_border_ptr = patch_with_border_ + y*(patch_size_+2) + 1; for(int x=0; x<patch_size_; ++x) ref_patch_ptr[x] = ref_patch_border_ptr[x]; } }
10.光流法的主要過程:
bool align2D( const cv::Mat& cur_img, uint8_t* ref_patch_with_border, uint8_t* ref_patch, const int n_iter, Vector2d& cur_px_estimate, bool no_simd) { const int halfpatch_size_ = 4; const int patch_size_ = 8; const int patch_area_ = 64; bool converged=false; // compute derivative of template and prepare inverse compositional float __attribute__((__aligned__(16))) ref_patch_dx[patch_area_]; float __attribute__((__aligned__(16))) ref_patch_dy[patch_area_]; Matrix3f H; H.setZero(); // compute gradient and hessian const int ref_step = patch_size_+2; float* it_dx = ref_patch_dx; float* it_dy = ref_patch_dy; for(int y=0; y<patch_size_; ++y) { uint8_t* it = ref_patch_with_border + (y+1)*ref_step + 1; for(int x=0; x<patch_size_; ++x, ++it, ++it_dx, ++it_dy) { Vector3f J; //計算梯度方向; J[0] = 0.5 * (it[1] - it[-1]); J[1] = 0.5 * (it[ref_step] - it[-ref_step]); J[2] = 1; //梯度賦值,即保存每個像素點的梯度信息,因為是逆向組合算法,所以計算的是參考幀上的梯度信息; *it_dx = J[0]; *it_dy = J[1]; H += J*J.transpose(); } } Matrix3f Hinv = H.inverse(); float mean_diff = 0; //估算當前幀中的位置,因為前面的直接法已經有了光流初始值; // Compute pixel location in new image: float u = cur_px_estimate.x(); float v = cur_px_estimate.y(); // termination condition const float min_update_squared = 0.03*0.03; // origin param: 0.03 * 0.03 const int cur_step = cur_img.step.p[0]; // float chi2 = 0; //開始叠代優化; Vector3f update; update.setZero(); for(int iter = 0; iter<n_iter; ++iter) { int u_r = floor(u); int v_r = floor(v); //判斷是否越界;應該是不會; if(u_r < halfpatch_size_ || v_r < halfpatch_size_ || u_r >= cur_img.cols-halfpatch_size_ || v_r >= cur_img.rows-halfpatch_size_) break; if(isnan(u) || isnan(v)) // TODO very rarely this can happen, maybe H is singular? should not be at corner.. check return false; //雙線性插值權重 // compute interpolation weights float subpix_x = u-u_r; float subpix_y = v-v_r; float wTL = (1.0-subpix_x)*(1.0-subpix_y); float wTR = subpix_x * (1.0-subpix_y); float wBL = (1.0-subpix_x)*subpix_y; float wBR = subpix_x * subpix_y; // loop through search_patch, interpolate uint8_t* it_ref = ref_patch; float* it_ref_dx = ref_patch_dx; float* it_ref_dy = ref_patch_dy; // float new_chi2 = 0.0; Vector3f Jres; Jres.setZero(); for(int y=0; y<patch_size_; ++y) { uint8_t* it = (uint8_t*) cur_img.data + (v_r+y-halfpatch_size_)*cur_step + u_r-halfpatch_size_; for(int x=0; x<patch_size_; ++x, ++it, ++it_ref, ++it_ref_dx, ++it_ref_dy) { float search_pixel = wTL*it[0] + wTR*it[1] + wBL*it[cur_step] + wBR*it[cur_step+1]; //計算當前幀和參考幀patch像素點之間的殘差; float res = search_pixel - *it_ref + mean_diff; //殘差乘以梯度為b; Jres[0] -= res*(*it_ref_dx); Jres[1] -= res*(*it_ref_dy); Jres[2] -= res; // new_chi2 += res*res; } } //更新值為Hinv*b; update = Hinv * Jres; u += update[0]; v += update[1]; mean_diff += update[2]; if(update[0]*update[0]+update[1]*update[1] < min_update_squared) { converged=true; break; } } cur_px_estimate << u, v; return converged; }
通過以上函數,完成了基於patch的特征匹配,後續就是通過高斯牛頓法優化相機位姿了。
SVO 特征對齊代碼分析