1. 程式人生 > >faster-rcnn demo程式碼修改進行視訊實時性測試

faster-rcnn demo程式碼修改進行視訊實時性測試

參考部落格原址:https://blog.csdn.net/qq_37124237/article/details/81087505


    
  1. #!/usr/bin/env python
  2. # --------------------------------------------------------
  3. # Faster R-CNN
  4. # Copyright (c) 2015 Microsoft
  5. # Licensed under The MIT License [see LICENSE for details]
  6. # Written by Ross Girshick
  7. # --------------------------------------------------------
  8. """
  9. Demo script showing detections in sample images.
  10. See README.md for installation instructions before running.
  11. """
  12. import _init_paths
  13. from fast_rcnn.config import cfg
  14. from fast_rcnn.test import im_detect
  15. from fast_rcnn.nms_wrapper import nms
  16. from utils.timer import Timer
  17. import matplotlib.pyplot as plt
  18. import numpy as np
  19. import scipy.io as sio
  20. import caffe, os, sys, cv2
  21. import argparse
  22. CLASSES = ( '__background__',
  23. 'ship')
  24. NETS = { 'vgg16': ( 'VGG16',
  25. 'VGG16_faster_rcnn_final.caffemodel'),
  26. 'zf': ( 'ZF',
  27. 'ZF_faster_rcnn_final.caffemodel'),
  28. 'wyx': ( 'wyx', 'vgg_cnn_m_1024_faster_rcnn_iter_1000.caffemodel')}
  29. def vis_detections(im, class_name, dets, thresh=0.5):
  30. """Draw detected bounding boxes."""
  31. inds = np.where(dets[:, -1] >= thresh)[ 0]
  32. if len(inds) == 0:
  33. return
  34. im = im[:, :, ( 2, 1, 0)]
  35. fig, ax = plt.subplots(figsize=( 12, 12))
  36. ax.imshow(im, aspect= 'equal')
  37. for i in inds:
  38. bbox = dets[i, : 4]
  39. score = dets[i, -1]
  40. ax.add_patch(
  41. plt.Rectangle((bbox[ 0], bbox[ 1]),
  42. bbox[ 2] - bbox[ 0],
  43. bbox[ 3] - bbox[ 1], fill= False,
  44. edgecolor= 'red', linewidth= 3.5)
  45. )
  46. ax.text(bbox[ 0], bbox[ 1] - 2,
  47. '{:s} {:.3f}'.format(class_name, score),
  48. bbox=dict(facecolor= 'blue', alpha= 0.5),
  49. fontsize= 14, color= 'white')
  50. ax.set_title(( '{} detections with '
  51. 'p({} | box) >= {:.1f}').format(class_name, class_name,
  52. thresh),
  53. fontsize= 14)
  54. plt.axis( 'off')
  55. plt.tight_layout()
  56. plt.draw()
  57. def vis_detections_video(im, class_name, dets, thresh=0.5):
  58. """Draw detected bounding boxes."""
  59. global lastColor,frameRate
  60. inds = np.where(dets[:, -1] >= thresh)[ 0]
  61. if len(inds) == 0:
  62. return im
  63. for i in inds:
  64. bbox = dets[i, : 4]
  65. score = dets[i, -1]
  66. cv2.rectangle(im,(bbox[ 0],bbox[ 1]),(bbox[ 2],bbox[ 3]),( 0, 0, 255), 2)
  67. cv2.rectangle(im,(int(bbox[ 0]),int(bbox[ 1] -20)),(int(bbox[ 0]+ 200),int(bbox[ 1])),( 10, 10, 10), -1)
  68. cv2.putText(im, '{:s} {:.3f}'.format(class_name, score),(int(bbox[ 0]),int(bbox[ 1] -2)),cv2.FONT_HERSHEY_SIMPLEX, .75,( 255, 255, 255)) #,cv2.CV_AA)
  69. return im
  70. def demo(net, im):
  71. """Detect object classes in an image using pre-computed object proposals."""
  72. global frameRate
  73. # Load the demo image
  74. #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
  75. #im = cv2.imread(im_file)
  76. # Detect all object classes and regress object bounds
  77. timer = Timer()
  78. timer.tic()
  79. scores, boxes = im_detect(net, im)
  80. timer.toc()
  81. print ( 'Detection took {:.3f}s for '
  82. '{:d} object proposals').format(timer.total_time, boxes.shape[ 0])
  83. frameRate = 1.0/timer.total_time
  84. print "fps: " + str(frameRate)
  85. # Visualize detections for each class
  86. CONF_THRESH = 0.8
  87. NMS_THRESH = 0.3
  88. for cls_ind, cls in enumerate(CLASSES[ 1:]):
  89. cls_ind += 1 # because we skipped background
  90. cls_boxes = boxes[:, 4*cls_ind: 4*(cls_ind + 1)]
  91. cls_scores = scores[:, cls_ind]
  92. dets = np.hstack((cls_boxes,
  93. cls_scores[:, np.newaxis])).astype(np.float32)
  94. keep = nms(dets, NMS_THRESH)
  95. dets = dets[keep, :]
  96. vis_detections_video(im, cls, dets, thresh=CONF_THRESH)
  97. cv2.putText(im, '{:s} {:.2f}'.format( "FPS:", frameRate),( 1750, 50),cv2.FONT_HERSHEY_SIMPLEX, 1,( 0, 0, 255))
  98. cv2.imshow(videoFilePath.split( '/')[len(videoFilePath.split( '/')) -1],im)
  99. cv2.waitKey( 20)
  100. def parse_args():
  101. """Parse input arguments."""
  102. parser = argparse.ArgumentParser(description= 'Faster R-CNN demo')
  103. parser.add_argument( '--gpu', dest= 'gpu_id', help= 'GPU device id to use [0]',
  104. default= 0, type=int)
  105. parser.add_argument( '--cpu', dest= 'cpu_mode',
  106. help= 'Use CPU mode (overrides --gpu)',
  107. action= 'store_true')
  108. parser.add_argument( '--net', dest= 'demo_net', help= 'Network to use [vgg16]',
  109. choices=NETS.keys(), default= 'vgg16')
  110. args = parser.parse_args()
  111. return args
  112. if __name__ == '__main__':
  113. cfg.TEST.HAS_RPN = True # Use RPN for proposals
  114. args = parse_args()
  115. # prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
  116. # 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
  117. prototxt = '/home/yexin/py-faster-rcnn/models/pascal_voc/VGG_CNN_M_1024/faster_rcnn_end2end/test.prototxt'
  118. # print 'see prototxt path{}'.format(prototxt)
  119. # caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
  120. # NETS[args.demo_net][1])
  121. caffemodel = '/home/yexin/py-faster-rcnn/output/faster_rcnn_end2end/voc_2007_trainval/vgg_cnn_m_1024_faster_rcnn_iter_100.caffemodel'
  122. # print '\n\nok'
  123. if not os.path.isfile(caffemodel):
  124. raise IOError(( '{:s} not found.\nDid you run ./data/script/'
  125. 'fetch_faster_rcnn_models.sh?').format(caffemodel))
  126. print '\n\nok'
  127. if args.cpu_mode:
  128. caffe.set_mode_cpu()
  129. else:
  130. caffe.set_mode_gpu()
  131. caffe.set_device(args.gpu_id)
  132. cfg.GPU_ID = args.gpu_id
  133. net = caffe.Net(prototxt, caffemodel, caffe.TEST)
  134. print '\n\nLoaded network {:s}'.format(caffemodel)
  135. # Warmup on a dummy image
  136. im = 128 * np.ones(( 300, 500, 3), dtype=np.uint8)
  137. for i in xrange( 2):
  138. _, _= im_detect(net, im)
  139. videoFilePath = '/home/yexin/py-faster-rcnn/data/demo/test_1-3.mp4'
  140. videoCapture = cv2.VideoCapture(videoFilePath)
  141. #success, im = videoCapture.read()
  142. while True :
  143. success, im = videoCapture.read()
  144. demo(net, im)
  145. if cv2.waitKey( 10) & 0xFF == ord( 'q'):
  146. break
  147. videoCapture.release()
  148. cv2.destroyAllWindows()

 


  
  1. #!/usr/bin/env python
  2. # --------------------------------------------------------
  3. # Faster R-CNN
  4. # Copyright (c) 2015 Microsoft
  5. # Licensed under The MIT License [see LICENSE for details]
  6. # Written by Ross Girshick
  7. # --------------------------------------------------------
  8. """
  9. Demo script showing detections in sample images.
  10. See README.md for installation instructions before running.
  11. """
  12. import _init_paths
  13. from fast_rcnn.config import cfg
  14. from fast_rcnn.test import im_detect
  15. from fast_rcnn.nms_wrapper import nms
  16. from utils.timer import Timer
  17. import matplotlib.pyplot as plt
  18. import numpy as np
  19. import scipy.io as sio
  20. import caffe, os, sys, cv2
  21. import argparse
  22. CLASSES = ( '__background__',
  23. 'ship')
  24. NETS = { 'vgg16': ( 'VGG16',
  25. 'VGG16_faster_rcnn_final.caffemodel'),
  26. 'zf': ( 'ZF',
  27. 'ZF_faster_rcnn_final.caffemodel'),
  28. 'wyx': ( 'wyx', 'vgg_cnn_m_1024_faster_rcnn_iter_1000.caffemodel')}