1. 程式人生 > >IOS使用OPENCV實現物體跟蹤

IOS使用OPENCV實現物體跟蹤

#import <opencv2/imgproc/imgproc_c.h>

#import <opencv2/video/tracking.hpp>

#import <opencv2/objdetect/objdetect.hpp>


加入視訊流監聽

<AVCaptureVideoDataOutputSampleBufferDelegate>實現視訊流監聽的代理

以一是UIImage到OpenCV影象資料的轉換,下面會呼叫到這幾個函式

#pragma mark -

#pragma mark OpenCV Support Methods

// NOTE you SHOULD cvReleaseImage() for the return value when end of the code.

- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {

CGImageRef imageRef = image.CGImage;

CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();

IplImage *iplimage =cvCreateImage(cvSize(image.size.width, image.size

.height),IPL_DEPTH_8U,4);

CGContextRef contextRef =CGBitmapContextCreate(iplimage->imageData, iplimage->width, iplimage->height,

iplimage->depth, iplimage->widthStep,

colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);

CGContextDrawImage(contextRef,CGRectMake(0,0, image.size

.width, image.size.height), imageRef);

CGContextRelease(contextRef);

CGColorSpaceRelease(colorSpace);

IplImage *ret =cvCreateImage(cvGetSize(iplimage),IPL_DEPTH_8U,3);

cvCvtColor(iplimage, ret,CV_RGBA2BGR);

cvReleaseImage(&iplimage);

return ret;

}

// NOTE You should convert color mode as RGB before passing to this function

- (UIImage *)UIImageFromIplImage:(IplImage *)image {

NSLog(@"IplImage (%d, %d) %d bits by %d channels, %d bytes/row %s", image->width, image->height, image->depth, image->nChannels, image->widthStep, image->channelSeq);

CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();

NSData *data = [NSDatadataWithBytes:image->imageDatalength:image->imageSize];

CGDataProviderRef provider =CGDataProviderCreateWithCFData((CFDataRef)data);

CGImageRef imageRef =CGImageCreate(image->width, image->height,

image->depth, image->depth * image->nChannels, image->widthStep,

colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault,

provider,NULL,false,kCGRenderingIntentDefault);

UIImage *ret = [UIImageimageWithCGImage:imageRef];

CGImageRelease(imageRef);

CGDataProviderRelease(provider);

CGColorSpaceRelease(colorSpace);

return ret;

}

下面是我照著camsift改寫的程式碼

- (void) CamShiftDetect:(UIImage *)capPic  {

NSAutoreleasePool * pool = [[NSAutoreleasePoolalloc]init];

   IplImage *image =0, *hsv = 0, *hue =0, *mask = 0, *backproject =0, *histimg = 0;

   CvHistogram *hist =0;

   int backproject_mode =0;

   int track_object =0;

   int select_object =0;

   CvConnectedComp track_comp;

   CvRect selection;

   CvRect track_window;

   CvBox2D track_box;

   int hdims =16;

   float hranges_arr[] = {0,180};

   float* hranges = hranges_arr;

   int vmin =90, vmax =256, smin =90;

//if(imageView.image) {

cvSetErrMode(CV_ErrModeParent);

/* allocate all the buffers */

       IplImage* frame =0;

        frame = [selfCreateIplImageFromUIImage:capPic];

//NSLog(@"%d  %d" , cvGetSize(frame).width, cvGetSize(frame).height);

        image =cvCreateImage(cvGetSize(frame),8,3 );

        image->origin = frame->origin;

        hsv =cvCreateImage(cvGetSize(frame),8,3 );

        hue =cvCreateImage(cvGetSize(frame),8,1 );

        mask =cvCreateImage(cvGetSize(frame),8,1 );

        backproject =cvCreateImage(cvGetSize(frame),8,1 );

        hist =cvCreateHist(1, &hdims,CV_HIST_ARRAY, &hranges,1 );

        histimg =cvCreateImage(cvSize(360,480),8,3 );

       cvZero( histimg );

NSString *path = [[NSBundlemainBundle]pathForResource:@"target12"ofType:@"jpg"];

        IplImage *tempimage = [selfCreateIplImageFromUIImage:[UIImageimageWithContentsOfFile:path]];

cvCvtColor( tempimage, hsv,CV_BGR2HSV );

       int _vmin = vmin, _vmax = vmax;

       cvInRangeS( hsv,cvScalar(0,smin,MIN(_vmin,_vmax),0),

                  cvScalar(180,256,MAX(_vmin,_vmax),0), mask );

       cvSplit( hsv, hue,0, 0, 0 );

        selection.x =1;

        selection.y =1;

        selection.width =360-1;

        selection.height=480-1;

       cvSetImageROI( hue, selection );

       cvSetImageROI( mask, selection );

       cvCalcHist( &hue, hist,0, mask );

       float max_val =0.f;

       cvGetMinMaxHistValue( hist,0, &max_val, 0,0 );

       cvConvertScale( hist->bins, hist->bins, max_val ?255. / max_val : 0.,0 );

       cvResetImageROI( hue );

       cvResetImageROI( mask );

        track_window = selection;

        track_object =1;

       cvZero( histimg );

       int bin_w = histimg->width / hdims;

       for(int i =0; i < hdims; i++ )

        {

           int val =cvRound(cvGetReal1D(hist->bins,i)*histimg->height/255 );

           CvScalar color =hsv2rgb(i*180.f/hdims);

           cvRectangle( histimg,cvPoint(i*bin_w,histimg->height),

                       cvPoint((i+1)*bin_w,histimg->height - val),

                        color, -1,8,0 );

        }

       cvReleaseImage(&tempimage);

       cvCopy( frame, image,0 );

       cvCvtColor( image, hsv,CV_BGR2HSV );

       if( track_object )

        {

           int _vmin = vmin, _vmax = vmax;

           cvInRangeS( hsv,cvScalar(0,smin,MIN(_vmin,_vmax),0),

                      cvScalar(180,256,MAX(_vmin,_vmax),0), mask );

           cvSplit( hsv, hue,0, 0, 0 );

           cvCalcBackProject( &hue, backproject, hist );

           cvAnd( backproject, mask, backproject,0 );

           cvCamShift( backproject, track_window,cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),&track_comp, &track_box );

            track_window = track_comp.rect;

           if( backproject_mode )

               cvCvtColor( backproject, image,CV_GRAY2BGR );

           if( image->origin )

                track_box.angle = -track_box.angle;

           cvEllipseBox( image, track_box,CV_RGB(255,0,0),3,CV_AA, 0 );

// Create canvas to show the results

           CGImageRef imageRef =imageView.image.CGImage;

           CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();

CGContextRef contextRef =CGBitmapContextCreate(NULL,imageView.image.size.width,imageView.image.size.height,

                                                           8,imageView.image.size.width *4,

                                                            colorSpace,kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);

           CGContextDrawImage(contextRef,CGRectMake(0,0, imageView.image.size.width,imageView.image.size.height), imageRef);

           CGContextSetLineWidth(contextRef,4);

           CGContextSetRGBStrokeColor(contextRef,0.0, 0.0, 1.0,0.5);            

// Draw results on the iamge

       NSLog(@" %d \n %d\n %d \n %d",track_window.x,track_window.y,track_window.width,track_window.height);

           NSLog(@"box %@",NSStringFromCGRect(CGRectMake(track_box.center.x,track_box.center.y,track_box.size.width,track_box.size.height)));

            [selfperformSelectorInBackground:@selector(draw1:)withObject:NSStringFromCGRect(CGRectMake(360-track_box.center.y,track_box.center.x,track_box.size.width,track_box.size.height))];

        }

       if( select_object && selection.width >0 && selection.height >0 )

        {

           cvSetImageROI( image, selection );

           cvXorS( image,cvScalarAll(255), image,0 );

           cvResetImageROI( image );

        }

        [selfhideProgressIndicator];

[poolrelease];

}


在這裡呼叫這個方法 

- (void)captureOutput:(AVCaptureOutput *)captureOutput

didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer

       fromConnection:(AVCaptureConnection *)connection

{

    [selfCamShiftDetect:[selfimageFromSampleBuffer:sampleBuffer]];、

}

//新增並啟動被捉視訊,

- (void)setupCaptureSession

{

   NSError *error =nil;

// Create the session

AVCaptureSession *session = [[[AVCaptureSessionalloc]init] autorelease];

// Configure the session to produce lower resolution video frames, if your

// processing algorithm can cope. We'll specify medium quality for the

// chosen device.

    session.sessionPreset =AVCaptureSessionPresetMedium;

// Find a suitable AVCaptureDevice

AVCaptureDevice *device = [AVCaptureDevice

                              defaultDeviceWithMediaType:AVMediaTypeVideo];//這裡預設是使用後置攝像頭,你可以改成前置攝像頭

// Create a device input with the device and add it to the session.

AVCaptureDeviceInput *input = [AVCaptureDeviceInputdeviceInputWithDevice:device

                                                                       error:&error];

   if (!input) {

// Handling the error appropriately.

    }

    [sessionaddInput:input];

// Create a VideoDataOutput and add it to the session

AVCaptureVideoDataOutput *output = [[[AVCaptureVideoDataOutputalloc]init] autorelease];

    [sessionaddOutput:output];

// Configure your output.

dispatch_queue_t queue =dispatch_queue_create("myQueue",NULL);

    [output setSampleBufferDelegate:selfqueue:queue];

dispatch_release(queue);

// Specify the pixel format

    output.videoSettings = [NSDictionarydictionaryWithObjectsAndKeys:

                            [NSNumbernumberWithInt:kCVPixelFormatType_32BGRA],kCVPixelBufferPixelFormatTypeKey,

                            [NSNumbernumberWithInt:360], (id)kCVPixelBufferWidthKey,

                            [NSNumbernumberWithInt:480], (id)kCVPixelBufferHeightKey,

                           nil];

AVCaptureVideoPreviewLayer* preLayer = [AVCaptureVideoPreviewLayerlayerWithSession: session];

//preLayer = [AVCaptureVideoPreviewLayer layerWithSession:session];

    preLayer.frame =CGRectMake(0,0,360, 480);

    preLayer.videoGravity =AVLayerVideoGravityResizeAspectFill;

    [self.view.layeraddSublayer:preLayer];

//[self.view addSubview:self.imageView];

// If you wish to cap the frame rate to a known value, such as 15 fps, set

// minFrameDuration.

    output.minFrameDuration =CMTimeMake(1,1);

// Start the session running to start the flow of data

    [sessionstartRunning];

// Assign session to an ivar.

//[self setSession:session];

}



程式碼比較亂沒有整理,最後擷取要跟蹤的物體的小一塊,做為目標圖片,啟動程式就會自動跟蹤這個體物體了,並在物體的位置畫個圈標記這個物體,速度比較慢,可能是手機處理視訊會很慢吧