RGB和深度影象獲取最終版
此處在原部落格基礎上進行了改正,調整了深度影象的歸一化處理方法,最終顯示減少了空洞
#include <opencv2\opencv.hpp>
#include<iostream>#include <Windows.h>
#include "NuiApi.h"
#include<cv.h>
#include <d3d11.h>
using namespace std;
using namespace cv;
//最遠距離(mm)
const int MAX_DISTANCE = 3500;
//最近距離(mm)
const int MIN_DISTANCE = 200;
const LONG m_depthWidth = 640;
const LONG m_depthHeight = 480;
const LONG m_colorWidth = 640;
const LONG m_colorHeight = 480;
const LONG cBytesPerPixel = 4; //畫素位數
//計算記憶體大小
int main()
{
//彩色影象
Mat image_rgb;
//深度影象
Mat image_depth;
//建立一個MAT
image_rgb.create(480,640,CV_8UC3);
image_depth.create(480,640,CV_8UC1);
//一個KINECT例項指標
INuiSensor* m_pNuiSensor = NULL;
if (m_pNuiSensor != NULL)
{
return 0;
}
//記錄當前連線KINECT的數量(為多連線做準備)
int iSensorCount;
//獲得當前KINECT的數量
HRESULT hr = NuiGetSensorCount(&iSensorCount);
//按照序列初始化KINETC例項,這裡就連線了一個KINECT,所以沒有用到迴圈
hr = NuiCreateSensorByIndex(iSensorCount - 1, &m_pNuiSensor);
//初始化,讓其可以接收彩色和深度資料流
hr = m_pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH);
//判斷是否出錯
if (FAILED(hr))
{
cout<<"NuiInitialize failed"<<endl;
return hr;
}
//彩色影象獲取下一幀事件
HANDLE nextColorFrameEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
//彩色影象事件控制代碼
HANDLE colorStreamHandle = NULL;
//深度影象獲取下一幀事件
HANDLE nextDepthFrameEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
//深度影象事件控制代碼
HANDLE depthStreamHandle = NULL;
//例項開啟資料流,這裡NUI_IMAGE_TYPE_COLOR表示彩色影象
hr = m_pNuiSensor->NuiImageStreamOpen(NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, 0,2,nextColorFrameEvent,&colorStreamHandle);
if( FAILED( hr ) )//判斷是否提取正確
{
cout<<"Could not open color image stream video"<<endl;
m_pNuiSensor->NuiShutdown();
return hr;
}
//例項開啟資料流,這裡NUI_IMAGE_TYPE_DEPTH表示深度影象
hr = m_pNuiSensor->NuiImageStreamOpen(NUI_IMAGE_TYPE_DEPTH, NUI_IMAGE_RESOLUTION_640x480, 0,2, nextDepthFrameEvent, &depthStreamHandle);
if( FAILED( hr ) )//判斷是否提取正確
{
cout<<"Could not open color image stream video"<<endl;
m_pNuiSensor->NuiShutdown();
return hr;
}
cv::namedWindow("depth",1);
moveWindow("depth",300,600);
cv::namedWindow("colorImage",1);
moveWindow("colorImage",0,200);
while (1)
{
NUI_IMAGE_FRAME pImageFrame_rgb;
NUI_IMAGE_FRAME pImageFrame_depth;
//無限等待新的彩色資料,等到後返回
if (WaitForSingleObject(nextColorFrameEvent, 0) == 0)
{
//從剛才開啟資料流的流控制代碼中得到該幀資料,讀取到的資料地址存於pImageFrame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(colorStreamHandle, 0, &pImageFrame_rgb);
if (FAILED(hr))
{
cout<<"Could not get color image"<<endl;
m_pNuiSensor->NuiShutdown();
return -1;
}
INuiFrameTexture *pTexture = pImageFrame_rgb.pFrameTexture;
NUI_LOCKED_RECT lockedRect;
//提取資料幀到LockedRect,它包括兩個資料物件:pitch每行位元組數,pBits第一個位元組地址
//並鎖定資料,這樣當我們讀資料的時候,kinect就不會去修改它
pTexture->LockRect(0, &lockedRect, NULL, 0);
//確認獲得的資料是否有效
if (lockedRect.Pitch != 0)
{
//將資料轉換為OpenCV的Mat格式
for (int i = 0; i < image_rgb.rows; i++)
{
//第i行的指標
uchar *prt = image_rgb.ptr(i);
//每個位元組代表一個顏色資訊,直接使用uchar
uchar *pBuffer = (uchar*)(lockedRect.pBits) + i * lockedRect.Pitch;
for (int j = 0; j < image_rgb.cols; j++)
{
prt[3 * j] = pBuffer[4 * j];//內部資料是4個位元組,0-1-2是BGR,第4個現在未使用
prt[3 * j + 1] = pBuffer[4 * j + 1];
prt[3 * j + 2] = pBuffer[4 * j + 2];
}
}
// Canny(image_rgb,image_rgb,0,30,3);
imshow("colorImage",image_rgb);
//解除鎖定
pTexture->UnlockRect(0);
//釋放幀
m_pNuiSensor->NuiImageStreamReleaseFrame(colorStreamHandle, &pImageFrame_rgb );
}
else
{
cout<<"Buffer length of received texture is bogus\r\n"<<endl;
}
BOOL nearMode;
INuiFrameTexture* pColorToDepthTexture;
//深度影象的處理
if (WaitForSingleObject(nextDepthFrameEvent, INFINITE) == 0)
{
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(depthStreamHandle, 0 , &pImageFrame_depth);
if (FAILED(hr))
{
cout<<"Could not get depth image"<<endl;
NuiShutdown();
return -1;
}
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture( depthStreamHandle, &pImageFrame_depth, &nearMode, &pColorToDepthTexture);
INuiFrameTexture *pTexture = pImageFrame_depth.pFrameTexture;
NUI_LOCKED_RECT lockedRect;
NUI_LOCKED_RECT ColorToDepthLockRect;
pTexture->LockRect(0, &lockedRect, NULL, 0);
pColorToDepthTexture->LockRect(0,&ColorToDepthLockRect,NULL,0);
//歸一化
for (int i = 0; i < image_depth.rows; i++)
{
uchar *prt = image_depth.ptr<uchar>(i);
uchar* pBuffer = (uchar*)(lockedRect.pBits) + i * lockedRect.Pitch;
//這裡需要轉換,因為每個深度資料是2個位元組,應將BYTE轉成USHORT
USHORT *pBufferRun = (USHORT*)pBuffer;
for (int j = 0; j < image_depth.cols; j++)
{
//先向,將資料歸一化處理,對深度距離在300mm-3500mm範圍內的畫素,對映到【0—255】內,
//超出範圍的,都去做是邊緣畫素
prt[j] = 255 - (BYTE)(256*pBufferRun[j]/0x0fff);//直接將資料歸一化處理
}
}
Mat out;
bilateralFilter(image_depth,out,25,25*2,25/2);
imshow("depth", image_depth);
//接下來是對齊部分,將前景摳出來
//存放深度點的引數
NUI_DEPTH_IMAGE_POINT* depthPoints = new NUI_DEPTH_IMAGE_POINT[640 * 480];
if (ColorToDepthLockRect.Pitch != 0)
{
HRESULT hrState = S_OK;
//一個能在不同空間座標轉變的類(包括:深度,彩色,骨骼)
INuiCoordinateMapper* pMapper;
//設定KINECT例項的空間座標系
hrState = m_pNuiSensor->NuiGetCoordinateMapper(&pMapper);
if (FAILED(hrState))
{
return hrState;
}
//重要的一步:從顏色空間對映到深度空間。引數說明:
//【引數1】:彩色影象的型別
//【引數2】:彩色影象的解析度
//【引數3】:深度影象的解析度
//【引數4】:深度影象的個數
//【引數5】:深度畫素點數
//【引數6】:取記憶體的大小,個數。型別為NUI_DEPTH_IMAGE_PIXEL
//【引數7】:存放對映結果點的引數
hrState = pMapper->MapColorFrameToDepthFrame(NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, NUI_IMAGE_RESOLUTION_640x480,
640 * 480, (NUI_DEPTH_IMAGE_PIXEL*)ColorToDepthLockRect.pBits,640 * 480, depthPoints);
if (FAILED(hrState))
{
return hrState;
}
//顯示的影象
Mat show;
show.create(480,640,CV_8UC3);
show = 0;
for (int i = 0; i < image_rgb.rows; i++)
{
for (int j = 0; j < image_rgb.cols; j++)
{
uchar *prt_rgb = image_rgb.ptr(i);
uchar *prt_show = show.ptr(i);
//在記憶體中偏移量
long index = i * 640 + j;
//從儲存了對映座標的陣列中獲取點
NUI_DEPTH_IMAGE_POINT depthPointAtIndex = depthPoints[index];
//邊界判斷
if (depthPointAtIndex.x >= 0 && depthPointAtIndex.x < image_depth.cols &&
depthPointAtIndex.y >=0 && depthPointAtIndex.y < image_depth.rows)
{
//深度判斷,在MIN_DISTANCE與MAX_DISTANCE之間的當成前景,顯示出來
//這個使用也很重要,當使用真正的深度畫素點再在深度影象中獲取深度值來判斷的時候,會出錯
if (depthPointAtIndex.depth >= MIN_DISTANCE && depthPointAtIndex.depth <= MAX_DISTANCE)
{
prt_show[3 * j] = prt_rgb[j * 3];
prt_show[3 * j + 1] = prt_rgb[j * 3 + 1];
prt_show[3 * j + 2] = prt_rgb[j * 3 + 2];
}
}
}
}
imshow("show", show);
}
delete []depthPoints;
pTexture->UnlockRect(0);
m_pNuiSensor->NuiImageStreamReleaseFrame(depthStreamHandle, &pImageFrame_depth);
}
else
{
cout<<"Buffer length of received texture is bogus\r\n"<<endl;
}
}
if (waitKey(20) == 27)
break;
}
return 0;
}