1. 程式人生 > 實用技巧 >VideoToolboxH264視訊編碼swift版本

VideoToolboxH264視訊編碼swift版本

VideoToolbox是一個低階框架,提供對硬體編碼器和解碼器的直接訪問。它提供視訊壓縮和解壓,以及儲存在CoreVideo畫素緩衝中的柵格影象格式之間的轉換服務。這些服務以會話物件(壓縮、解壓縮和畫素傳輸)的形式提供,它們作為Core Foundation (CF)型別提供。不需要直接訪問硬體編碼器和解碼器的應用程式應該不需要直接使用VideoToolbox。

使用VideoToolbox對視訊進行硬編碼和硬解碼,下面是對視訊進行H264編碼的實現過程,demo原始碼地址:https://github.com/duzhaoquan/VideoEncodeH264

1.視訊捕獲

//視訊捕獲相關
    var
session : AVCaptureSession = AVCaptureSession() var queue = DispatchQueue(label: "quque") var input: AVCaptureDeviceInput? lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.session) lazy var recordOutput = AVCaptureMovieFileOutput() var captureView : UIView! let output
= AVCaptureVideoDataOutput() var focusBox:UIView! var exposureBox : UIView! //開始捕獲 func startCapture(){ guard let device = getCamera(postion: .back) else{ return } guard let input = try? AVCaptureDeviceInput(device: device) else{ return
} self.input = input if session.canAddInput(input) { session.addInput(input) } previewLayer.isHidden = false //檢視重力 previewLayer.videoGravity = .resizeAspect session.startRunning() //編碼 encoder = DQVideoEncoder(width: 480, height: 640) encoder.videoEncodeCallback {[weak self] (data) in // self?.writeTofile(data: data) self?.decoder.decode(data: data) // self?.ccDecode?.decodeNaluData(data) } encoder.videoEncodeCallbackSPSAndPPS {[weak self] (sps, pps) in //存入檔案 // self?.writeTofile(data: sps) // self?.writeTofile(data: pps) //直接解碼 self?.decoder.decode(data: sps) self?.decoder.decode(data: pps) // self?.ccDecode?.decodeNaluData(sps) // self?.ccDecode?.decodeNaluData(pps) } //解碼 decoder = DQVideoDecode(width: 480, height: 640) decoder.SetVideoDecodeCallback { (image) in self.player?.pixelBuffer = image } //OC版本使用 let con = CCVideoConfig() con.width = 480 con.height = 640 con.bitrate = 480 * 640 * 5 ccencode = CCVideoEncoder(config: con) ccencode?.delegate = self ccDecode = CCVideoDecoder(config: con) ccDecode?.delegate = self } //寫入檔案 func writeTofile(data: Data){ try? self.fileHandle?.seekToEnd() self.fileHandle?.write(data) } // @objc func recordAction(btn:UIButton){ btn.isSelected = !btn.isSelected if !session.isRunning{ session.startRunning() } if btn.isSelected { btn.setTitle("stop record", for: .normal) output.setSampleBufferDelegate(self, queue: queue) if session.canAddOutput(output){ session.addOutput(output) } output.alwaysDiscardsLateVideoFrames = false //這裡設定格式為BGRA,而不用YUV的顏色空間,避免使用Shader轉換 //注意:這裡必須和後面CVMetalTextureCacheCreateTextureFromImage 儲存影象畫素儲存格式保持一致.否則視訊會出現異常現象. output.videoSettings = [String(kCVPixelBufferPixelFormatTypeKey) :NSNumber(value: kCVPixelFormatType_32BGRA) ] let connection: AVCaptureConnection = output.connection(with: .video)! connection.videoOrientation = .portrait if fileHandle == nil{ //生成的檔案地址 guard let path = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first else { return } let filePath = "\(path)/video.h264" try? FileManager.default.removeItem(atPath: filePath) if FileManager.default.createFile(atPath: filePath, contents: nil, attributes: nil){ print("建立264檔案成功") }else{ print("建立264檔案失敗") } fileHandle = FileHandle(forWritingAtPath: filePath) } }else{ session.removeOutput(output) btn.setTitle("start record", for: .normal) } } //獲取相機裝置 func getCamera(postion: AVCaptureDevice.Position) -> AVCaptureDevice? { var devices = [AVCaptureDevice]() if #available(iOS 10.0, *) { let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified) devices = discoverySession.devices } else { devices = AVCaptureDevice.devices(for: AVMediaType.video) } for device in devices { if device.position == postion { return device } } return nil }

2.視訊編碼器封裝

class DQVideoEncoder: NSObject {
    
    var frameID:Int64 = 0
    var hasSpsPps = false
    var width: Int32 = 480
    var height:Int32 = 640
    var bitRate : Int32 = 480 * 640 * 3 * 4
    var fps : Int32 = 10
    var encodeQueue = DispatchQueue(label: "encode")
    var callBackQueue = DispatchQueue(label: "callBack")
    
    var encodeSession:VTCompressionSession!
    var encodeCallBack:VTCompressionOutputCallback?
    
    var videoEncodeCallback : ((Data)-> Void)?
    func videoEncodeCallback(block:@escaping (Data)-> Void){
        self.videoEncodeCallback = block
    }
    var videoEncodeCallbackSPSAndPPS :((Data,Data)->Void)?
    func videoEncodeCallbackSPSAndPPS(block:@escaping (Data,Data)->Void) {
        videoEncodeCallbackSPSAndPPS = block
    }
    
    init(width:Int32 = 480,height:Int32 = 640,bitRate : Int32? = nil,fps: Int32? = nil ) {
        
        self.width = width
        self.height = height
        self.bitRate = bitRate != nil ? bitRate! : 480 * 640 * 3 * 4
        self.fps = (fps != nil) ? fps! : 10
        super.init()
        
        setCallBack()
        initVideoToolBox()
        
    }
    //初始化編碼器
    func initVideoToolBox() {
        print(self)
        //建立VTCompressionSession
//        var bself = self
        let state = VTCompressionSessionCreate(allocator: kCFAllocatorDefault, width: width, height: height, codecType: kCMVideoCodecType_H264, encoderSpecification: nil, imageBufferAttributes: nil, compressedDataAllocator: nil, outputCallback:encodeCallBack , refcon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self), compressionSessionOut: &self.encodeSession)
        
        if state != 0{
            print("creat VTCompressionSession failed")
            return
        }
        
        //設定實時編碼輸出
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_RealTime, value: kCFBooleanTrue)
        //設定編碼方式
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ProfileLevel, value: kVTProfileLevel_H264_Baseline_AutoLevel)
        //設定是否產生B幀(因為B幀在解碼時並不是必要的,是可以拋棄B幀的)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AllowFrameReordering, value: kCFBooleanFalse)
        //設定關鍵幀間隔
        var frameInterval = 10
        let number = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &frameInterval)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_MaxKeyFrameInterval, value: number)
        
        //設定期望幀率,不是實際幀率
        let fpscf = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &fps)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ExpectedFrameRate, value: fpscf)
        
        //設定位元速率平均值,單位是bps。位元速率大了話就會非常清晰,但同時檔案也會比較大。位元速率小的話,影象有時會模糊,但也勉強能看
        //位元速率計算公式參考筆記
        //        var bitrate = width * height * 3 * 4
        let bitrateAverage = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &bitRate)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AverageBitRate, value: bitrateAverage)
        
        //位元速率限制
        let bitRatesLimit :CFArray = [bitRate * 2,1] as CFArray
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_DataRateLimits, value: bitRatesLimit)
    }
    
    //開始編碼
    func encodeVideo(sampleBuffer:CMSampleBuffer){
        if self.encodeSession == nil {
            initVideoToolBox()
        }
        encodeQueue.async {
            let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
            let time = CMTime(value: self.frameID, timescale: 1000)
            
            let state = VTCompressionSessionEncodeFrame(self.encodeSession, imageBuffer: imageBuffer!, presentationTimeStamp: time, duration: .invalid, frameProperties: nil, sourceFrameRefcon: nil, infoFlagsOut: nil)
            if state != 0{
                print("encode filure")
            }
        }
        
    }
    private func setCallBack()  {
        //編碼完成回撥
        encodeCallBack = {(outputCallbackRefCon, sourceFrameRefCon, status, flag, sampleBuffer)  in

            //指標物件轉換
            let encoder :DQVideoEncoder = unsafeBitCast(outputCallbackRefCon, to: DQVideoEncoder.self)
            
            guard sampleBuffer != nil else {
                return
            }
            
            /// 0. 原始位元組資料 8位元組
            let buffer : [UInt8] = [0x00,0x00,0x00,0x01]
            /// 1. [UInt8] -> UnsafeBufferPointer<UInt8>
            let unsafeBufferPointer = buffer.withUnsafeBufferPointer {$0}
            /// 2.. UnsafeBufferPointer<UInt8> -> UnsafePointer<UInt8>
            let  unsafePointer = unsafeBufferPointer.baseAddress
            guard let startCode = unsafePointer else {return}
            
            let attachArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer!, createIfNecessary: false)
            
            
            var strkey = unsafeBitCast(kCMSampleAttachmentKey_NotSync, to: UnsafeRawPointer.self)
            let cfDic = unsafeBitCast(CFArrayGetValueAtIndex(attachArray, 0), to: CFDictionary.self)
            let keyFrame = !CFDictionaryContainsKey(cfDic, strkey);//沒有這個鍵就意味著同步,就是關鍵幀
            
            //  獲取sps pps
            if keyFrame && !encoder.hasSpsPps{
                if let description = CMSampleBufferGetFormatDescription(sampleBuffer!){
                    var spsSize: Int = 0, spsCount :Int = 0,spsHeaderLength:Int32 = 0
                    var ppsSize: Int = 0, ppsCount: Int = 0,ppsHeaderLength:Int32 = 0
                    //var spsData:UInt8 = 0, ppsData:UInt8 = 0
                    
                    var spsDataPointer : UnsafePointer<UInt8>? = UnsafePointer(UnsafeMutablePointer<UInt8>.allocate(capacity: 0))
                    var ppsDataPointer : UnsafePointer<UInt8>? = UnsafePointer<UInt8>(bitPattern: 0)
                    let spsstatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 0, parameterSetPointerOut: &spsDataPointer, parameterSetSizeOut: &spsSize, parameterSetCountOut: &spsCount, nalUnitHeaderLengthOut: &spsHeaderLength)
                    if spsstatus != 0{
                        print("sps失敗")
                    }
                    
                    let ppsStatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 1, parameterSetPointerOut: &ppsDataPointer, parameterSetSizeOut: &ppsSize, parameterSetCountOut: &ppsCount, nalUnitHeaderLengthOut: &ppsHeaderLength)
                    if ppsStatus != 0 {
                        print("pps失敗")
                    }
                    
                    
                    if let spsData = spsDataPointer,let ppsData = ppsDataPointer{
                        var spsDataValue = Data(capacity: 4 + spsSize)
                        spsDataValue.append(buffer, count: 4)
                        spsDataValue.append(spsData, count: spsSize)
                        
                        var ppsDataValue = Data(capacity: 4 + ppsSize)
                        ppsDataValue.append(startCode, count: 4)
                        ppsDataValue.append(ppsData, count: ppsSize)
                        encoder.callBackQueue.async {
                            encoder.videoEncodeCallbackSPSAndPPS!(spsDataValue, ppsDataValue)
                        }
                        
                        
                    }
                }
            }
            
            let dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer!)
//            var arr = [Int8]()
//            let pointer = arr.withUnsafeMutableBufferPointer({$0})
            var dataPointer: UnsafeMutablePointer<Int8>?  = nil
            var totalLength :Int = 0
            let blockState = CMBlockBufferGetDataPointer(dataBuffer!, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: &totalLength, dataPointerOut: &dataPointer)
            if blockState != 0{
                print("獲取data失敗\(blockState)")
            }
            
            //NALU
            var offset :UInt32 = 0
            //返回的nalu資料前四個位元組不是0001的startcode(不是系統端的0001),而是大端模式的幀長度length
            let lengthInfoSize = 4
            //迴圈寫入nalu資料
            while offset < totalLength - lengthInfoSize {
                //獲取nalu 資料長度
                var naluDataLength:UInt32 = 0
                memcpy(&naluDataLength, dataPointer! + UnsafeMutablePointer<Int8>.Stride(offset), lengthInfoSize)
                //大端轉系統端
                naluDataLength = CFSwapInt32BigToHost(naluDataLength)
                //獲取到編碼好的視訊資料
                var data = Data(capacity: Int(naluDataLength) + lengthInfoSize)
                data.append(buffer, count: 4)
                //轉化pointer;UnsafeMutablePointer<Int8> -> UnsafePointer<UInt8>
                let naluUnsafePoint = unsafeBitCast(dataPointer, to: UnsafePointer<UInt8>.self)

                data.append(naluUnsafePoint + UnsafePointer<UInt8>.Stride(offset + UInt32(lengthInfoSize)) , count: Int(naluDataLength))
                
                encoder.callBackQueue.async {
                    encoder.videoEncodeCallback!(data)
                }
                offset += (naluDataLength + UInt32(lengthInfoSize))
                
            }
            
            
        }
    }
    
    
    deinit {
        if ((encodeSession) != nil) {
            VTCompressionSessionCompleteFrames(encodeSession, untilPresentationTimeStamp: .invalid)
            VTCompressionSessionInvalidate(encodeSession);
            
           
            encodeSession = nil;
        }
    }
    
}

3.視訊解碼器封裝

class DQVideoDecode: NSObject {

    var width: Int32 = 480
    var height:Int32 = 640
    
    var decodeQueue = DispatchQueue(label: "decode")
    var callBackQueue = DispatchQueue(label: "decodeCallBack")
    var decodeDesc : CMVideoFormatDescription?
    
    var spsData:Data?
    var ppsData:Data?
    
    var decompressionSession : VTDecompressionSession?
    var callback :VTDecompressionOutputCallback?
    
    var videoDecodeCallback:((CVImageBuffer?) -> Void)?
    func SetVideoDecodeCallback(block:((CVImageBuffer?) -> Void)?)  {
        videoDecodeCallback = block
    }
    
    init(width:Int32,height:Int32) {
        self.width = width
        self.height = height
        
        
    }
    
    func initDecoder() -> Bool {
        
        if decompressionSession != nil {
            return true
        }
        guard spsData != nil,ppsData != nil else {
            return false
        }
//        var frameData = Data(capacity: Int(size))
//        frameData.append(length, count: 4)
//        let point :UnsafePointer<UInt8> = [UInt8](data).withUnsafeBufferPointer({$0}).baseAddress!
//        frameData.append(point + UnsafePointer<UInt8>.Stride(4), count: Int(naluSize))
        //處理sps/pps
        var sps : [UInt8] = []
        [UInt8](spsData!).suffix(from: 4).forEach { (value) in
            sps.append(value)
        }
        var pps : [UInt8] = []
        [UInt8](ppsData!).suffix(from: 4).forEach{(value) in
            pps.append(value)
        }
        
        let spsAndpps = [sps.withUnsafeBufferPointer{$0}.baseAddress!,pps.withUnsafeBufferPointer{$0}.baseAddress!]
        let sizes = [sps.count,pps.count]

        /**
        根據sps pps設定解碼引數
        param kCFAllocatorDefault 分配器
        param 2 引數個數
        param parameterSetPointers 引數集指標
        param parameterSetSizes 引數集大小
        param naluHeaderLen nalu nalu start code 的長度 4
        param _decodeDesc 解碼器描述
        return 狀態
        */
        let descriptionState = CMVideoFormatDescriptionCreateFromH264ParameterSets(allocator: kCFAllocatorDefault, parameterSetCount: 2, parameterSetPointers: spsAndpps, parameterSetSizes: sizes, nalUnitHeaderLength: 4, formatDescriptionOut: &decodeDesc)
        if descriptionState != 0 {
            print("description建立失敗" )
            return false
        }
        //解碼回撥設定
        /*
         VTDecompressionOutputCallbackRecord 是一個簡單的結構體,它帶有一個指標 (decompressionOutputCallback),指向幀解壓完成後的回撥方法。你需要提供可以找到這個回撥方法的例項 (decompressionOutputRefCon)。VTDecompressionOutputCallback 回撥方法包括七個引數:
                引數1: 回撥的引用
                引數2: 幀的引用
                引數3: 一個狀態標識 (包含未定義的程式碼)
                引數4: 指示同步/非同步解碼,或者解碼器是否打算丟幀的標識
                引數5: 實際影象的緩衝
                引數6: 出現的時間戳
                引數7: 出現的持續時間
         */
        setCallBack()
        var callbackRecord = VTDecompressionOutputCallbackRecord(decompressionOutputCallback: callback, decompressionOutputRefCon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self))
        /*
         解碼引數:
        * kCVPixelBufferPixelFormatTypeKey:攝像頭的輸出資料格式
         kCVPixelBufferPixelFormatTypeKey,已測可用值為
            kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,即420v
            kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,即420f
            kCVPixelFormatType_32BGRA,iOS在內部進行YUV至BGRA格式轉換
         YUV420一般用於標清視訊,YUV422用於高清視訊,這裡的限制讓人感到意外。但是,在相同條件下,YUV420計算耗時和傳輸壓力比YUV422都小。
         
        * kCVPixelBufferWidthKey/kCVPixelBufferHeightKey: 視訊源的解析度 width*height
         * kCVPixelBufferOpenGLCompatibilityKey : 它允許在 OpenGL 的上下文中直接繪製解碼後的影象,而不是從匯流排和 CPU 之間複製資料。這有時候被稱為零拷貝通道,因為在繪製過程中沒有解碼的影象被拷貝.
         
         */
        let imageBufferAttributes = [
            kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
            kCVPixelBufferWidthKey:width,
            kCVPixelBufferHeightKey:height,
//            kCVPixelBufferOpenGLCompatibilityKey:true
            ] as [CFString : Any]
        
        //建立session
        
        /*!
         @function    VTDecompressionSessionCreate
         @abstract    建立用於解壓縮視訊幀的會話。
         @discussion  解壓後的幀將通過呼叫OutputCallback發出
         @param    allocator  記憶體的會話。通過使用預設的kCFAllocatorDefault的分配器。
         @param    videoFormatDescription 描述源視訊幀
         @param    videoDecoderSpecification 指定必須使用的特定視訊解碼器.NULL
         @param    destinationImageBufferAttributes 描述源畫素緩衝區的要求 NULL
         @param    outputCallback 使用已解壓縮的幀呼叫的回撥
         @param    decompressionSessionOut 指向一個變數以接收新的解壓會話
         */
        let state = VTDecompressionSessionCreate(allocator: kCFAllocatorDefault, formatDescription: decodeDesc!, decoderSpecification: nil, imageBufferAttributes: imageBufferAttributes as CFDictionary, outputCallback: &callbackRecord, decompressionSessionOut: &decompressionSession)
        if state != 0 {
            print("建立decodeSession失敗")
        }
        VTSessionSetProperty(self.decompressionSession!, key: kVTDecompressionPropertyKey_RealTime, value: kCFBooleanTrue)
        
        return true
        
    }
    //解碼成功的回掉
    private func setCallBack()  {
        //(UnsafeMutableRawPointer?, UnsafeMutableRawPointer?, OSStatus, VTDecodeInfoFlags, CVImageBuffer?, CMTime, CMTime) -> Void
        callback = { decompressionOutputRefCon,sourceFrameRefCon,status,inforFlags,imageBuffer,presentationTimeStamp,presentationDuration in
            let decoder :DQVideoDecode = unsafeBitCast(decompressionOutputRefCon, to: DQVideoDecode.self)
            guard imageBuffer != nil else {
                return
            }
//            sourceFrameRefCon = imageBuffer
            if let block = decoder.videoDecodeCallback  {
                decoder.callBackQueue.async {
                    block(imageBuffer)
                }
                
            }
        }
    }
    func decode(data:Data) {
        decodeQueue.async {
            let length:UInt32 =  UInt32(data.count)
            self.decodeByte(data: data, size: length)
        }
    }
    private func decodeByte(data:Data,size:UInt32) {
        //資料型別:frame的前4個位元組是NALU資料的開始碼,也就是00 00 00 01,
        // 將NALU的開始碼轉為4位元組大端NALU的長度資訊
        let naluSize = size - 4
        let length : [UInt8] = [
            UInt8(truncatingIfNeeded: naluSize >> 24),
            UInt8(truncatingIfNeeded: naluSize >> 16),
            UInt8(truncatingIfNeeded: naluSize >> 8),
            UInt8(truncatingIfNeeded: naluSize)
            ]
        var frameByte :[UInt8] = length
        [UInt8](data).suffix(from: 4).forEach { (bb) in
            frameByte.append(bb)
        }
        let bytes = frameByte //[UInt8](frameData)
        // 第5個位元組是表示資料型別,轉為10進位制後,7是sps, 8是pps, 5是IDR(I幀)資訊
        let type :Int  = Int(bytes[4] & 0x1f)
        switch type{
        case 0x05:
            if initDecoder() {
                decode(frame: bytes, size: size)
            }
            
        case 0x06:
//            print("增強資訊")
            break
        case 0x07:
            spsData = data
        case 0x08:
            ppsData = data
        default:
            if initDecoder() {
                decode(frame: bytes, size: size)
            }
        }
    }
    
    private func decode(frame:[UInt8],size:UInt32) {
        //
        var blockBUffer :CMBlockBuffer?
        var frame1 = frame
//        var memoryBlock = frame1.withUnsafeMutableBytes({$0}).baseAddress
//        var ddd = Data(bytes: frame, count: Int(size))
        //建立blockBuffer
        /*!
         引數1: structureAllocator kCFAllocatorDefault
         引數2: memoryBlock  frame
         引數3: frame size
         引數4: blockAllocator: Pass NULL
         引數5: customBlockSource Pass NULL
         引數6: offsetToData  資料偏移
         引數7: dataLength 資料長度
         引數8: flags 功能和控制標誌
         引數9: newBBufOut blockBuffer地址,不能為空
         */
        let blockState = CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault,
                                           memoryBlock: &frame1,
                                           blockLength: Int(size),
                                           blockAllocator: kCFAllocatorNull,
                                           customBlockSource: nil,
                                           offsetToData:0,
                                           dataLength: Int(size),
                                           flags: 0,
                                           blockBufferOut: &blockBUffer)
        if blockState != 0 {
            print("建立blockBuffer失敗")
        }
//
        var sampleSizeArray :[Int] = [Int(size)]
        var sampleBuffer :CMSampleBuffer?
        //建立sampleBuffer
        /*
         引數1: allocator 分配器,使用預設記憶體分配, kCFAllocatorDefault
         引數2: blockBuffer.需要編碼的資料blockBuffer.不能為NULL
         引數3: formatDescription,視訊輸出格式
         引數4: numSamples.CMSampleBuffer 個數.
         引數5: numSampleTimingEntries 必須為0,1,numSamples
         引數6: sampleTimingArray.  陣列.為空
         引數7: numSampleSizeEntries 預設為1
         引數8: sampleSizeArray
         引數9: sampleBuffer物件
         */
        let readyState = CMSampleBufferCreateReady(allocator: kCFAllocatorDefault,
                                  dataBuffer: blockBUffer,
                                  formatDescription: decodeDesc,
                                  sampleCount: CMItemCount(1),
                                  sampleTimingEntryCount: CMItemCount(),
                                  sampleTimingArray: nil,
                                  sampleSizeEntryCount: CMItemCount(1),
                                  sampleSizeArray: &sampleSizeArray,
                                  sampleBufferOut: &sampleBuffer)
        if readyState != 0 {
            print("Sample Buffer Create Ready faile")
        }
        //解碼資料
        /*
         引數1: 解碼session
         引數2: 源資料 包含一個或多個視訊幀的CMsampleBuffer
         引數3: 解碼標誌
         引數4: 解碼後資料outputPixelBuffer
         引數5: 同步/非同步解碼標識
         */
        let sourceFrame:UnsafeMutableRawPointer? = nil
        var inforFalg = VTDecodeInfoFlags.asynchronous
        let decodeState = VTDecompressionSessionDecodeFrame(self.decompressionSession!, sampleBuffer: sampleBuffer!, flags:VTDecodeFrameFlags._EnableAsynchronousDecompression , frameRefcon: sourceFrame, infoFlagsOut: &inforFalg)
        if decodeState != 0 {
            print("解碼失敗")
        }
    }
    
    deinit {
        if decompressionSession != nil {
            VTDecompressionSessionInvalidate(decompressionSession!)
            decompressionSession = nil
        }
        
    }
}

demo原始碼地址:https://github.com/duzhaoquan/VideoEncodeH264