1. 程式人生 > >opencv3.10光流法和前景提取法

opencv3.10光流法和前景提取法

//bgfg_segm.h

#ifndef BGFG_SEGM_H
#define BGFG_SEGM_H

#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"

using namespace cv;

enum Method
{
	MOG,
	MOG2,
	GMG,
	FGD_STAT
};

extern void foregroundExtraction(const Mat &frmae, Method m);

#endif
//bgfg_segm.cpp

#include <iostream>
#include <string>

#include "bgfg_segm.h"
#include "opencv2/cudabgsegm.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"

using namespace std;
using namespace cv::cuda;

static void fgd_Stat(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr<BackgroundSubtractor> fgd = cuda::createBackgroundSubtractorFGD();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		fgd->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		fgd->apply(d_frame, d_fgmask);
		fgd->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void mog(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr<BackgroundSubtractor> mog = cuda::createBackgroundSubtractorMOG();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		mog->apply(d_frame, d_fgmask, 0.01);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		mog->apply(d_frame, d_fgmask, 0.01);
		mog->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void mog2(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr<BackgroundSubtractor> mog2 = cuda::createBackgroundSubtractorMOG2();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		mog2->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		mog2->apply(d_frame, d_fgmask);
		mog2->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void gmg(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr<BackgroundSubtractor> gmg = cuda::createBackgroundSubtractorGMG(40);

	static GpuMat d_fgmask;

	static Mat fgmask;
	static Mat fgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		gmg->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();
		gmg->apply(d_frame, d_fgmask);
		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
	}
}

void foregroundExtraction(const Mat &frmae, Method m)
{
	assert(m >= MOG && m <= FGD_STAT);

	switch (m)
	{
	case FGD_STAT:
		fgd_Stat(frmae);
		break;

	case MOG:
		mog(frmae);
		break;

	case MOG2:
		mog2(frmae);
		break;

	case GMG:
		gmg(frmae);
		break;

	default:
		cout << "沒有該方法,程式將退出." << endl;
		exit(1);
		break;
	}
}

//optical_flow.h

#ifndef OPTICAL_FLOW_H
#define OPTICAL_FLOW_H

#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>

using namespace cv;

extern const char *optical_Flow_Name[];
extern Mat opticalFlowOut;

extern void optical_flow(const Mat &frame0, const Mat &frame1, const char *optical_Flow_Name);

#endif

//optical_flow.cpp

#include <iostream>
#include <fstream>

#include "optical_flow.h"
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/highgui.hpp"
#include "opencv2/cudaoptflow.hpp"
#include "opencv2/cudaarithm.hpp"

using namespace std;
using namespace cv::cuda;

Mat opticalFlowOut;					//原始光流輸出圖片,三通道		CV_8UC3

inline bool isFlowCorrect(Point2f u)
{
	return !cvIsNaN(u.x) && !cvIsNaN(u.y) && fabs(u.x) < 1e9 && fabs(u.y) < 1e9;
}

static Vec3b computeColor(float fx, float fy)
{
	static bool first = true;

	// relative lengths of color transitions:
	// these are chosen based on perceptual similarity
	// (e.g. one can distinguish more shades between red and yellow
	//  than between yellow and green)
	const int RY = 15;
	const int YG = 6;
	const int GC = 4;
	const int CB = 11;
	const int BM = 13;
	const int MR = 6;
	const int NCOLS = RY + YG + GC + CB + BM + MR;
	static Vec3i colorWheel[NCOLS];

	if (first)
	{
		int k = 0;

		for (int i = 0; i < RY; ++i, ++k)
			colorWheel[k] = Vec3i(255, 255 * i / RY, 0);

		for (int i = 0; i < YG; ++i, ++k)
			colorWheel[k] = Vec3i(255 - 255 * i / YG, 255, 0);

		for (int i = 0; i < GC; ++i, ++k)
			colorWheel[k] = Vec3i(0, 255, 255 * i / GC);

		for (int i = 0; i < CB; ++i, ++k)
			colorWheel[k] = Vec3i(0, 255 - 255 * i / CB, 255);

		for (int i = 0; i < BM; ++i, ++k)
			colorWheel[k] = Vec3i(255 * i / BM, 0, 255);

		for (int i = 0; i < MR; ++i, ++k)
			colorWheel[k] = Vec3i(255, 0, 255 - 255 * i / MR);

		first = false;
	}

	const float rad = sqrt(fx * fx + fy * fy);
	const float a = atan2(-fy, -fx) / (float)CV_PI;

	const float fk = (a + 1.0f) / 2.0f * (NCOLS - 1);
	const int k0 = static_cast<int>(fk);
	const int k1 = (k0 + 1) % NCOLS;
	const float f = fk - k0;

	Vec3b pix;

	for (int b = 0; b < 3; b++)
	{
		const float col0 = colorWheel[k0][b] / 255.0f;
		const float col1 = colorWheel[k1][b] / 255.0f;

		float col = (1 - f) * col0 + f * col1;

		if (rad <= 1)
			col = 1 - rad * (1 - col); // increase saturation with radius
		else
			col *= .75; // out of range

		pix[2 - b] = static_cast<uchar>(255.0 * col);
	}

	return pix;
}

static void drawOpticalFlow(const Mat_<float>& flowx, const Mat_<float>& flowy, Mat& dst, float maxmotion = -1)
{
	static bool isFirstTime = true;
	if (isFirstTime)
	{
		dst.create(flowx.size(), CV_8UC3);
		isFirstTime = false;
	}

	dst.setTo(Scalar::all(0));

	// determine motion range:
	float maxrad = maxmotion;

	if (maxmotion <= 0)
	{
		maxrad = 1;
		for (int y = 0; y < flowx.rows; ++y)
		{
			for (int x = 0; x < flowx.cols; ++x)
			{
				Point2f u(flowx(y, x), flowy(y, x));

				if (!isFlowCorrect(u))
					continue;

				maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
			}
		}
	}

	for (int y = 0; y < flowx.rows; ++y)
	{
		for (int x = 0; x < flowx.cols; ++x)
		{
			Point2f u(flowx(y, x), flowy(y, x));

			if (isFlowCorrect(u))
				dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
		}
	}
}

static void showFlow(const char* name, const GpuMat& d_flow)
{
	static GpuMat planes[2];
	cuda::split(d_flow, planes);

	Mat flowx(planes[0]);
	Mat flowy(planes[1]);

	//drawOpticalFlow(flowx, flowy, opticalFlowOut, 10);
	drawOpticalFlow(flowx, flowy, opticalFlowOut);	//-1

	imshow(name, opticalFlowOut);
}

//Brox光流法
static void optical_flow_Brox(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr<cuda::BroxOpticalFlow> brox = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);

	static GpuMat d_frame0f;
	static GpuMat d_frame1f;

	d_frame0.convertTo(d_frame0f, CV_32F, 1.0 / 255.0);
	d_frame1.convertTo(d_frame1f, CV_32F, 1.0 / 255.0);

	//int64 start = getTickCount();

	brox->calc(d_frame0f, d_frame1f, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "Brox : " << timeSec << " sec" << endl;

	showFlow("Brox", d_flow);
}

//LK光流法
static void optical_flow_LK(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr<cuda::DensePyrLKOpticalFlow> lk = cuda::DensePyrLKOpticalFlow::create(Size(7, 7));

	//int64 start = getTickCount();

	lk->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "LK : " << timeSec << " sec" << endl;

	showFlow("LK", d_flow);
}

//Farn光流法
static void optical_flow_Farn(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr<cuda::FarnebackOpticalFlow> farn = cuda::FarnebackOpticalFlow::create();
	//int64 start = getTickCount();

	farn->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "Farn : " << timeSec << " sec" << endl;

	showFlow("Farn", d_flow);
}

//TVL1光流法
static void optical_flow_TVL1(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr<cuda::OpticalFlowDual_TVL1> tvl1 = cuda::OpticalFlowDual_TVL1::create();
	//int64 start = getTickCount();

	tvl1->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "TVL1 : " << timeSec << " sec" << endl;

	showFlow("TVL1", d_flow);
}

typedef void(*optical_Flow)(const Mat &, const Mat &);
static optical_Flow optical_Flow_Method[] = { optical_flow_Brox, \
optical_flow_LK, \
optical_flow_Farn, \
optical_flow_TVL1, \
NULL };

const char *optical_Flow_Name[] = { "Brox", "LK", "Farn", "TVL1", NULL };

static int getIndexOfOpticalFlowMethod(const char *methodName)
{
	assert(methodName != NULL);
	for (int i = 0; i < sizeof(optical_Flow_Name) / sizeof(*optical_Flow_Name) - 1; i++)
	{
		if (strcmp(methodName, optical_Flow_Name[i]) == 0)
		{
			return i;
		}
	}

	return -1;
}

//fram0前一幀影象,frame1frame1當前幀影象,均為灰度圖
void optical_flow(const Mat &frame0, const Mat &frame1, const char *optical_Flow_Name)
{
	static bool isFirstTime = true;
	if (isFirstTime)
	{
		assert(optical_Flow_Name != NULL && !frame0.empty() && !frame1.empty() && frame0.size() == frame1.size());
		isFirstTime = false;
	}
	
	/*if (optical_Flow_Name == NULL)
	{
	cout << "請指定正確的光流方法" << endl;
	return;
	}*/

	//static int methodIndex = getIndexOfOpticalFlowMethod(optical_Flow_Name);
	static int methodIndex = getIndexOfOpticalFlowMethod(optical_Flow_Name);

	assert(methodIndex > -1);
	/*if (methodIndex == -1)
	{
	cout << "請指定正確的光流方法" << endl;
	return;
	}*/

	optical_Flow_Method[methodIndex](frame0, frame1);
}

//main.cpp

#include "bgfg_segm.h"
#include "optical_flow.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/highgui.hpp"
#include <iostream>

int main(int argc, char *argv[])
{
	const char *orignalFrameWin = "原始視訊幀流";

	//char *fileName = "E:/opencv2.48/sources/samples/gpu/768x576.avi";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00152.MP4";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00153.MP4"; 
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/20161101050502.MTS";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00151.MP4";
	char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00158.MP4";
	//const char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00152.MP4";
	//const char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00154.MP4";
	
	Method foregroudExtMethod = MOG;	//FGD_STAT, MOG, MOG2, GMG
	int frames = 0;
	Mat currentFrame, previousFrame;
	Mat currentGrayFrame, previousGrayFrame;
	bool isFirstFrame = true;

	VideoCapture cap;
	cap.open(fileName);
	if (!cap.isOpened())
	{
		fprintf(stderr, "Video can't be opened!\n");
		return 1;
	}

	cudaError_t cudaStatus;
	cudaStatus = cudaSetDevice(0);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
		return 1;
	}

	while (cap.isOpened())
	{
		cap >> currentFrame;
		if (!currentFrame.data)
		{
			fprintf(stderr, "Frame is empty!\n");
			break;
		}

		while (currentFrame.cols > 800)
		{
			resize(currentFrame, currentFrame, Size(currentFrame.cols / 2, currentFrame.rows / 2));
		}

		frames++;
		cv::cvtColor(currentFrame, currentGrayFrame, CV_RGB2GRAY);
		std::cout << "第" << frames << "幀" << std::endl;
		imshow(orignalFrameWin, currentFrame);

		if (isFirstFrame)
		{
			swap(currentGrayFrame, previousGrayFrame);
			isFirstFrame = false;
		}
		else
		{
			//計算光流
			//const char *optical_Flow_Method_Name[] = { "Brox", "LK", "Farn", "TVL1", NULL };
			optical_flow(previousGrayFrame, currentGrayFrame, optical_Flow_Name[0]);
			swap(currentGrayFrame, previousGrayFrame);
		}

		//前景提取
		foregroundExtraction(currentFrame, foregroudExtMethod);

		int key = cv::waitKey(1);
		if (key == 27) break;
		else if (key == 'p' || key == 'P') cv::waitKey();
	}
	
	cudaStatus = cudaDeviceReset();
	if (cudaStatus != cudaSuccess)
	{
		fprintf(stderr, "cudaDeviceReset failed!");
		return 1;
	}

	return 0;
}


相關推薦

opencv3.10前景取法

//bgfg_segm.h #ifndef BGFG_SEGM_H #define BGFG_SEGM_H #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" using namespace

OpenCV Using Python——基於SURF特徵提取金字塔LK的單目視覺三維重建

基於SURF特徵提取和金字塔LK光流法的單目視覺三維重建 1. 單目視覺三維重建問題         在前面的文章中,筆者用SIFT提取特徵後用radio測試剔除了匹配中異常的特徵點,然後根據匹配合格的特徵點計算基礎矩陣和本徵矩陣,對本徵矩陣SVD分解來估計和構造透視矩陣,

opencv3的使用

#include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highg

《視覺SLAM十四講精品總結》9:直接

內容: 二者關係 光流法 直接法 一、二者關係 引出原因:關鍵點和描述子計算非常耗時,可以保留特徵點,使用光流法跟蹤特徵點運動。 關係:光流法描述畫素在影象中運動,直接法利用相機運動模型計算特徵點在下一時刻影象中位置。 使用條件:直接法利用影象的畫素灰度資訊計算

【圖像處理】openCV追蹤運動物體

num blank ndis water 不同 h+ width 相關性 ida openCV光流法追蹤運動物體 email:[email protected]/* */ 一、光流簡單介紹 摘自:zouxy09 光流的概念是G

的理解

光流法是比較經典的運動估計方法,本文不僅敘述簡單明瞭,而且附程式碼,故收藏. 在空間中,運動可以用運動場描述。而在一個影象平面上,物體的運動往往是通過影象序列中不同圖象灰度分佈的不同體現的。從而,空間中的運動場轉移到影象上就表示為光流場,光流場反映了影象上每一點灰度的變化趨勢。

入門基礎

本文參考了https://blog.csdn.net/zouxy09/article/details/8683859 光流的概念:它是空間運動物體在觀察成像平面上的畫素運動的瞬時速度,是利用影象序列中畫素在時間域上的變化以及相鄰幀之間的相關性來找到上一幀跟當前幀之間存在的對應關係,從而計算出相鄰幀

opencv對特定區域進行跟蹤

本例子使用了opencv3.0,利用滑鼠選擇矩形框,然後對選擇的區域進行跟蹤 //---------------------------------光流法對特定區域進行跟蹤----------------- #include <iostream> #include <o

OpenCV中跟蹤器的使用方法學習

一、基於特徵點的目標跟蹤的一般步驟 (1)探測當前幀的特徵點 (2)通過當前幀和下一幀的灰度比較,估計當前幀特徵點在下一幀的位置 (3)過濾位置不變的特徵點,餘下的點就是目標了 基於特徵點的目標跟蹤演算法和1,2兩個步驟有關,特徵點可以是Harris角點,也可以是邊緣點等等,第二步估

影象處理之---

LK光流演算法公式詳解: 由於工程需要用到 Lucas-Kanade 光流,在此進行一下簡單整理(後續還會陸續整理關於KCF,PCA,SVM,最小二乘、嶺迴歸、核函式、dpm等等): 光流,簡單說也就是畫面移動過程中,影象上每個畫素的x,y位移量,比如第t幀的時候A點的位置是(x1,

Lucas-Kanade 演算法原理以及應用,正向、反向、additive、Compositional

先祭出一片神級總結性的文章:Lucas-Kanade 20 Years On: A Unifying Framework Lucas-Kanade 演算法原理以及應用 Lucas-Kanade 演算法原理以及應用 一 演算法原理 1

學習

光流估計就是指利用時間上相鄰的兩幀影象,得到點的運動。滿足以下幾點假設: 前後兩幀點的位移不大(泰勒展開) 外界光強保持恆定。 空間相關性,每個點的運動和他們的鄰居相似(連續函式,泰勒展開) 在相鄰的兩幀影象中,點(x,y)發生了位移(u,v),那麼移動前後兩點的亮度應該是相等的。如下:

【OpenCV筆記】之金字塔Lucas-Kanade

本文參考連結:https://blog.csdn.net/zy122121cs/article/details/44955353 參考論文:”Pyramidal Implementation of the Lucas Kanade Feature TrackerDescrip

(Optical Flow)及OpenCV實現

Optical Flow Optical flow 有兩個假設: 亮度恆定:在相鄰連續兩幀中一個目標的畫素強度不會變化。 空間一致性:周圍畫素有類似執行。 時間規律:相鄰幀時間足夠短,以至於在考慮執行變化時可以忽略它們之間的差異。 假設在第一幀中畫素

目標跟蹤之LK

簡介   光流是一種簡單實用的影象運動的表達方式,通常定義為一個影象序列中的影象亮度模式的表觀運動,即空間物體表面上的點的運動速度在視覺感測器的成像平面上的表達。----百度百科 光流法的前提假設: (1)相鄰幀之間的亮度恆定 (2)相鄰視訊幀的取幀時間連續,或者,相

OpenCV學習筆記(二十六)——小試SVM演算法ml OpenCV學習筆記(二十七)——基於級聯分類器的目標檢測objdect OpenCV學習筆記(二十八)——對運動目標跟蹤Video Ope

OpenCV學習筆記(二十六)——小試SVM演算法ml  總感覺自己停留在碼農的初級階段,要想更上一層,就得靜下心來,好好研究一下演算法的東西。OpenCV作為一個計算機視覺的開源庫,肯定不會只停留在數字影象處理的初級階段,我也得加油,深入研究它的演算法庫。就從ml入手

【影象處理】openCV追蹤運動物體

openCV光流法追蹤運動物體 email:[email protected] 一、光流簡介         光流的概念是Gibson在1950年首先提出來的。它是空間運動物體在觀察成像平面上的畫素運動的瞬時速度,是利用影象序列中畫素在時間域上的變化以及相鄰幀之

-運動目標的檢測(opencv學習)

        cvCalcOpticalFlowPyrLK 函式在使用時,首先要確定特徵點,也就是目標舊的位置。         本程式通過使用cvGoodFeaturesToTrack 函式選擇角點作為特徵點。         本程式只是一個簡單的運動檢測,在具體應用過程中,可以根據自己的需要修正 #i

學習--optical flow

學習背景: 為了更好地去理解Sift_flow的提出背景,廢話不多說,步入正題。 《學習openCV》一書,把它放在第10章跟蹤與運動,想必定和跟蹤問題有關了,腦子裡可以大概過一下跟蹤方面的問題,,自己過吧,略。 http://blog.csdn.net/carson200

OpenCv_運動目標檢測

以下內容摘自一篇碩士論文《視訊序列中運動目標檢測與跟蹤演算法的研究》: 1950年Gibson首先提出了光流的概念,光流(optical flow)法是空間運動物體在觀測成像面上的畫素運動的瞬時速度。物體在運動的時候,它在影象上對應點的亮度模式也在做相應的運