python opencv圖片編碼為h264檔案的例項
阿新 • • 發佈:2020-01-09
python部分
#!/usr/bin/env Python # coding=utf-8 from ctypes import * from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * import time import numpy as np import cv2 import struct import datetime from numba import jit import os cam_dict={} class CamInfo: def __init__(self,cam_no=0,deviceid="default",cam_name="default"): self.cam_no = cam_no self.deviceid = deviceid self.cam_name = cam_name @jit def trans(data,size,height,width): bbb = string_at(data,size) nparr = np.fromstring(bbb,np.uint8) r = nparr.reshape(height,width,3) return r def str2char_p(str_v): pStr = c_char_p( ) pStr.value = str_v return pStr def callb_stream(data,cam_no,width): r = trans(data,width) r = cv2.cvtColor(r,cv2.COLOR_RGB2BGR) counter = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f') # print(1,counter) cv2.imshow(str(cam_no),r) cv2.waitKey(1) def callb_camerainfo(cam_no,camera_info,camera_info_size): # print(cast(camera_info,c_char_p).value) # print(str(cast(camera_info,c_char_p).value)) bbb = string_at(camera_info,camera_info_size) info=str(bbb,encoding="utf-8").split(",") cam_dict[cam_no]= CamInfo(cam_no,info[1],info[2]) print("camerainfo",cam_dict[cam_no].cam_no,cam_dict[cam_no].cam_name,cam_dict[cam_no].deviceid) class Mythread(QThread): # 定義訊號,定義引數為str型別 breakSignal = pyqtSignal(str,list) def __init__(self,parent=None): super().__init__(parent) # super(Mythread,self).__init__() def callb_error(self,err_type,msg_no,msg_level,msg_txt,msg_txtlen): print("myerror",msg_txtlen) def run(self): dll = CDLL(r"./hik_client.dll") width=60 height=40 dll.pre_encode.restype = c_void_p ret=dll.pre_encode(width,height) ret=cast(ret,c_void_p) for i in range(20000): n=i%200+1 img=cv2.imread("bmp/"+str(n)+".bmp") len = img.shape[0] * img.shape[1] * img.shape[2] # img=np.transpose(img,(1,2)) # b,g,r = cv2.split(img) # b = b.reshape(-1) # g = g.reshape(-1) # r = r.reshape(-1) # b = np.append(b,g) # img = np.append(b,r) img = img.reshape(-1) # b,r) INPUT = c_int * len # 例項化一個長度為2的整型陣列 input = INPUT() # 為陣列賦值(input這個陣列是不支援迭代的) for i in range(len): input[i] = img[i] # bytes(aaaa,encoding="utf-8") a = dll.push_rtsp(input,len,ret) QCoreApplication.instance().quit() # print("encode_ok",i) # b = string_at(a,1280*720*3) # print(b) # nparr = np.fromstring(b,np.uint8) # # print(nparr[-10:-1],min(nparr),max(nparr)) # img_decode = cv2.imdecode(nparr,cv2.IMREAD_COLOR) # # if img_decode: # cv2.imshow("sadf",img_decode) # # cv2.imwrite(str(index)+".jpg",img_decode) # cv2.waitKey(0) # # # # # ErrorCall = CFUNCTYPE(c_void_p,c_int,c_char_p,c_int) # error_callback = ErrorCall(self.callb_error) # dll.set_callback(error_callback) # CamCall = CFUNCTYPE(c_void_p,c_int) # caminfo_CamCall = CamCall(callb_camerainfo) # # print(b) # if not os.path.exists("video"): # os.makedirs("video") # ip = b"127.0.0.1" # port = 8888 # print("start conn") # ret=-1 # while(ret): # print("conn server...") # ret= dll.tcp_init(str2char_p(ip),port) # time.sleep(0.3) # if (ret==0): # type=1 # ret = dll.getcameralist(type,caminfo_CamCall) # if (1): # # deviceId = b"af94a68df0124d1fbf0fc2b07f3b3c3a" # cam_no=14 # else: # print("tcp error") # for i in range(2000000): # # 發出訊號 # a=[i,i+1] # self.breakSignal.emit(str(i),a) # # 讓程式休眠 # time.sleep(0.5) if __name__ == '__main__': app = QApplication([]) dlg = QDialog() dlg.resize(400,300) dlg.setWindowTitle("自定義按鈕測試") dlgLayout = QVBoxLayout() dlgLayout.setContentsMargins(40,40,40) btn = QPushButton('測試按鈕') dlgLayout.addWidget(btn) dlgLayout.addStretch(40) dlg.setLayout(dlgLayout) dlg.show() def chuli(a,s): # dlg.setWindowTitle(s) btn.setText(a+str(s[0]*10)) # 建立執行緒 thread = Mythread() # # 註冊訊號處理函式 thread.breakSignal.connect(chuli) # # 啟動執行緒 thread.start() dlg.exec_() app.exit()
c++動態庫部分
#include "stdafx.h" #include "CVdll.h" #include "SimpleLog.h" #include <iostream> #include<fstream> #include <sys/types.h> #include "opencv2/opencv.hpp" #include "Ws2tcpip.h" #include <winsock2.h> #include <fcntl.h> #include <cstring> #include <cstdio> #include <signal.h> #pragma comment(lib,"ws2_32.lib") #include <queue> using namespace cv; extern "C" { #include "libavcodec/avcodec.h" #include "libavformat/avformat.h" #include "libswscale/swscale.h" #include "libavdevice/avdevice.h" #include "libavutil/log.h" //#include "libavutil/imgutils.h" }; //說明,動態庫需要拷貝三個檔案,否則重連會出問題 char* testchar(int plus1) { char* str = "hello world111111"; return str; } char* testimg(char* data,int length) { char* str = "hello world111111"; return str; } int outbuf_size = 100000; class Rtmp_tool { public: int nWidth = 0; int nHeight = 0; AVCodecContext *c; AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB幀資料 AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV幀資料 uint8_t * yuv_buff;// uint8_t * outbuf; SwsContext * scxt; FILE *f = NULL; }; void* pre_encode(int width,int height) { Rtmp_tool *rtmp_tool; rtmp_tool = new Rtmp_tool(); int nLen; int fileI; rtmp_tool->nWidth = width; rtmp_tool->nHeight = height; av_register_all(); avcodec_register_all(); //AVFrame *m_pRGBFrame = new AVFrame[1]; //RGB幀資料 //AVFrame *m_pYUVFrame = new AVFrame[1];; //YUV幀資料 AVCodecContext *c = NULL; AVCodecContext *in_c = NULL; AVCodec *pCodecH264; //編碼器 //查詢h264編碼器 pCodecH264 = avcodec_find_encoder(AV_CODEC_ID_H264); c = avcodec_alloc_context3(pCodecH264); c->bit_rate = 3000000;// put sample parameters c->width = width;// c->height = height;// // frames per second AVRational rate; rate.num = 1; rate.den = 5; c->time_base = rate;//(AVRational){1,25}; c->gop_size = 10; // emit one intra frame every ten frames c->max_b_frames = 1; c->thread_count = 1; c->pix_fmt = AV_PIX_FMT_YUV420P;//PIX_FMT_RGB24; //av_opt_set(c->priv_data,/*"preset"*/"libvpx-1080p.ffpreset",/*"slow"*/NULL,0); //開啟編碼器 if (avcodec_open2(c,pCodecH264,NULL)<0) printf("不能開啟編碼庫"); int size = c->width * c->height; rtmp_tool->yuv_buff = (uint8_t *)malloc((size * 3) / 2); // size for YUV 420 //圖象編碼 rtmp_tool->outbuf = (uint8_t*)malloc(outbuf_size); int u_size = 0; const char * filename = "0_Data.h264"; rtmp_tool->f = fopen(filename,"wb"); if (!rtmp_tool->f) { printf("could not open %s\n",filename); exit(1); } //初始化SwsContext rtmp_tool->scxt = sws_getContext(c->width,c->height,AV_PIX_FMT_BGR24,c->width,AV_PIX_FMT_YUV420P,SWS_POINT,NULL,NULL); rtmp_tool->c = c; return rtmp_tool; } char* push_rtsp(int* plus1,int len,void* vp) { Rtmp_tool *rtmp_tool =(Rtmp_tool *) vp; for (int i = 0; i < len; i++) { plus1[i] = (uint8_t)plus1[i]; } AVCodecContext *c = rtmp_tool->c;// (AVCodecContext*)vp; printf("2 %d %d\n",c->height); //--------------- AVPacket avpkt; AVFrame *m_pRGBFrame = rtmp_tool->m_pRGBFrame; AVFrame *m_pYUVFrame = rtmp_tool->m_pYUVFrame; /*unsigned char *pBmpBuf; pBmpBuf = new unsigned char[len];*/ //memcpy(rgb_buff,(uint8_t*)plus1,nDataLen); // avpicture_fill((AVPicture*)m_pRGBFrame,AV_PIX_FMT_RGB24,rtmp_tool->nWidth,rtmp_tool->nHeight); m_pRGBFrame->linesize[0] = c->width * 3; m_pRGBFrame->linesize[1] =0; m_pRGBFrame->linesize[2] =0; m_pRGBFrame->linesize[3] =0; m_pRGBFrame->format = AV_PIX_FMT_RGB24; m_pRGBFrame->width = rtmp_tool->nWidth; m_pRGBFrame->height = rtmp_tool->nHeight; uint8_t *p = m_pRGBFrame->data[0]; int y = 0,x = 0; for (y = 0; y < rtmp_tool->nHeight; y++) { for (x = 0; x < rtmp_tool->nWidth; x++) { *p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3]; // R *p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3 +1]; // G *p++ = (uint8_t)plus1[(y*rtmp_tool->nWidth + x) * 3 +2] ; // B } } printf("1 %d %d \n",rtmp_tool->nHeight); //將YUV buffer 填充YUV Frame avpicture_fill((AVPicture*)m_pYUVFrame,(uint8_t*)rtmp_tool->yuv_buff,rtmp_tool->nHeight); // 翻轉RGB影象 //m_pRGBFrame->data[0] += m_pRGBFrame->linesize[0] * (rtmp_tool->nHeight - 1); //m_pRGBFrame->linesize[0] *= -1; //m_pRGBFrame->data[1] += m_pRGBFrame->linesize[1] * (rtmp_tool->nHeight / 2 - 1); //m_pRGBFrame->linesize[1] *= -1; //m_pRGBFrame->data[2] += m_pRGBFrame->linesize[2] * (rtmp_tool->nHeight / 2 - 1); //m_pRGBFrame->linesize[2] *= -1; //將RGB轉化為YUV sws_scale(rtmp_tool->scxt,m_pRGBFrame->data,m_pRGBFrame->linesize,m_pYUVFrame->data,m_pYUVFrame->linesize); int got_packet_ptr = 0; av_init_packet(&avpkt); avpkt.data = rtmp_tool->outbuf; avpkt.size = outbuf_size; int u_size = avcodec_encode_video2(c,&avpkt,m_pYUVFrame,&got_packet_ptr); m_pYUVFrame->pts++; if (u_size == 0){ int res = fwrite(avpkt.data,1,avpkt.size,rtmp_tool->f); if (res == 0) { printf("000"); } else { printf("1253"); } } //-------end--------- //Mat mat; ////載入圖片 //mat = imread("bgs.jpg",CV_LOAD_IMAGE_COLOR); //printf("a %d %d",mat.rows,mat.cols); ////if (!mat.empty()) { //int m,n; //n = mat.cols * 3; //m = mat.rows; //unsigned char *data = (unsigned char*)malloc(sizeof(unsigned char) * m * n); //int p = 0; //for (int i = 0; i < m; i++) //{ // for (int j = 0; j < n; j++) // { // data[p] = mat.at<unsigned char>(i,j); // p++; // } //} //*plus1 = p; return NULL; //return (char*)data; } struct RecStruct //資料包 { int size; int data_type; int cam_no; int error_code; char recvbuf[1500]; }; struct SendStcuct { int size; int data_type; int cam_no; char sendbuf[1000]; }data_send; static ErrorCallBack g_errorcall = 0; static CamInfoCallBack g_caminfocall = 0; typedef struct CameraInfo { std::ofstream foutV; int timeInHour = -1; }caminfo; std::map<int,CameraInfo*> cameraMap; //static std::map<int,queue<RecStruct*>> namemap; static SOCKET g_sockClient; HANDLE hMutex; //char* deviceId; /**判斷str1是否以str2開頭 * 如果是返回1 * 不是返回0 * 出錯返回-1 * */ int is_begin_with(const char * str1,char *str2) { if (str1 == NULL || str2 == NULL) return -1; int len1 = strlen(str1); int len2 = strlen(str2); if ((len1 < len2) || (len1 == 0 || len2 == 0)) return -1; char *p = str2; int i = 0; while (*p != '\0') { if (*p != str1[i]) return 0; p++; i++; } return 1; } char* Strcpy(char * a,const char * b) { int i = 0; while (*b) a[i++] = *b++; a[i] = 0; return a; } int sendcmd(char* data,int cam_no,int type,int size) { memset(data_send.sendbuf,1000); //data2send.sendbuf = new char[strlen(data)]; //memset(data2send.sendbuf,strlen(data)); data_send.size = size; data_send.data_type = type; data_send.cam_no = cam_no; memcpy(data_send.sendbuf,data,sizeof(char) * (size)); printf("data_send len %d\n",sizeof(data_send)); if (g_sockClient) send(g_sockClient,(char *)&data_send,sizeof(struct SendStcuct),0); return 0; } int set_callback(ErrorCallBack terrorcall(int error_type,int err_no,int msg_level,char* msg_txt,int spare)) { g_errorcall = (ErrorCallBack)terrorcall; return 0; } MYLIBDLL int getcameralist(int type,CamInfoCallBack caminfocall(int cam_no,char* cam_info,int cam_info_size)) { g_caminfocall = (CamInfoCallBack)caminfocall; SendStcuct data_send; memset(&data_send,sizeof(struct SendStcuct)); data_send.size = 20; data_send.data_type = 1; data_send.cam_no = 0; char* data = "getcameralist"; memcpy(data_send.sendbuf,sizeof(char) * (20)); printf("data_send len %d\n",0); return 0; } DWORD WINAPI RecvThread(LPVOID lpParameter); DWORD WINAPI RecvThread(LPVOID lpParameter){ SOCKET sockClient = (SOCKET)lpParameter; while (1) { RecStruct data_recv; int ret; memset(&data_recv,'0',sizeof(struct RecStruct)); ret = recv(sockClient,(char *)&data_recv,sizeof(struct RecStruct),0); //第二個引數使用強制型別,為一個數據包 if (ret == 0) // server呼叫了close { printf("server close"); break; } else if (ret == SOCKET_ERROR) // 網路錯誤 { int err = WSAGetLastError(); printf("get message %d %d %d \n",ret,SOCKET_ERROR,err); if (err == WSAECONNRESET || err == WSAECONNABORTED) { printf("tcp error %d %d \n",err,SOCKET_ERROR); //int n = namemap.erase(deviceId);//如果刪除了會返回1,否則返回0 } break; } //printf("reve type %d %d",data_recv.data_type,data_recv.size); switch (data_recv.data_type) { case 1://攝像頭列表 { g_caminfocall(data_recv.cam_no,data_recv.recvbuf,data_recv.size); } break; case 3://異常資訊 { if (g_errorcall!=0) g_errorcall(1,data_recv.error_code,4,0); break; } case 2: { char* recemsg = data_recv.recvbuf; int is_null = is_begin_with(recemsg,"00000"); if (is_null == 1) { printf("recv type 2 00000"); continue; } break; } default: break; } if (ret < 0) { printf("WSAStartup() failed!\n"); return -1; } Sleep(20); } return 0; } int tcpInit(char* ip,int port) { av_log_set_level(AV_LOG_PANIC); WSADATA wsaData; if (WSAStartup(MAKEWORD(2,2),&wsaData) != 0) { printf("初始化Winsock失敗"); return -1; } SOCKADDR_IN addrSrv; addrSrv.sin_family = AF_INET; addrSrv.sin_port = htons(port); SOCKET sockClient = socket(AF_INET,SOCK_STREAM,0); int nRecvBuf = 0;//設定為32K setsockopt(sockClient,SOL_SOCKET,SO_RCVBUF,(const char*)&nRecvBuf,sizeof(int)); //setsockopt(sockClient,(char *)&nZero,sizeof(nZero)); inet_pton(AF_INET,ip,&addrSrv.sin_addr.s_addr); if (connect(sockClient,(struct sockaddr*)&addrSrv,sizeof(addrSrv)) == -1) return -2; //throw "連線失敗"; if (SOCKET_ERROR == sockClient) { printf("Socket() error:%d",WSAGetLastError()); return -3; } g_sockClient = sockClient; HANDLE h_thread = CreateThread(NULL,RecvThread,(LPVOID)sockClient,NULL); CloseHandle(h_thread); return 0; } class DeviceInfo { public: string cam_name; int cam_no; SOCKET sockClient; }; //Callback int read_buffer(void *opaque,uint8_t *buf,int buf_size) { DeviceInfo deviceInfo = *((DeviceInfo *)opaque); int null_count=0; int display_count = 0; while (1) { RecStruct data_recv; int ret; memset(&data_recv,sizeof(struct RecStruct)); ret = recv(deviceInfo.sockClient,0); //第二個引數使用強制型別,為一個數據包 if (ret == 0) // server呼叫了close { printf("server close"); break; } else if (ret == SOCKET_ERROR) // 網路錯誤 { printf("get message %d %d \n",SOCKET_ERROR); int err = WSAGetLastError(); if (g_errorcall != 0) g_errorcall(1,deviceInfo.cam_no,"socket err",0); //if (err == WSAECONNRESET || err == WSAECONNABORTED) { // printf("server break %s",deviceId); // //int n = namemap.erase(deviceId);//如果刪除了會返回1,否則返回0 //} break; } if (data_recv.size == 0) { null_count++; if (null_count %1000==0) { if (g_errorcall != 0) g_errorcall(1,2,"data_recv 0",11); printf("reve len=0 type %d\n",data_recv.data_type); null_count = 0; } Sleep(2); continue; } else if (data_recv.size >1500) { if (g_errorcall != 0) g_errorcall(1,"data_recv too long",data_recv.size); printf("reve data too long %d\n",data_recv.size); continue; } if (data_recv.data_type == 3) { if (g_errorcall) { char err_str[10]; _itoa(data_recv.error_code,err_str,10); //正確解法一 g_errorcall(1,0); } } else if (data_recv.data_type == 2) { null_count=0; display_count++; char* recemsg = data_recv.recvbuf; int is_null = is_begin_with(recemsg,"00000"); if (is_null == 1) { printf("recv 00000"); continue; } //printf("cam_no %d",data_recv.cam_no); //int cam_no = data_recv.cam_no; buf_size = data_recv.size; memcpy(buf,buf_size); if (g_errorcall && buf_size>1000 && display_count%20==0) { g_errorcall(2,"rece data",1);//err_type,column,spare display_count = 0; } //儲存流資料並分小時儲存 time_t tt = time(NULL);//這句返回的只是一個時間cuo tm* t = localtime(&tt); auto iter = cameraMap.find(deviceInfo.cam_no); if (iter != cameraMap.end()){ iter->second->foutV.write(data_recv.recvbuf,data_recv.size); if (t->tm_min == 0 && (iter->second->timeInHour != t->tm_hour)) { //判斷間隔一小時 iter->second->timeInHour = t->tm_hour; iter->second->foutV.close(); time_t tt = time(NULL);//這句返回的只是一個時間cuo tm* t = localtime(&tt); char ctmBegin[20]; strftime(ctmBegin,20,"/%Y%m%d%H%M",t); char str3[80]; sprintf(str3,"create data:%s%s%s",deviceInfo.cam_name,ctmBegin,".dat"); SLOG1(str3); printf("%s",deviceInfo.cam_name + std::string(ctmBegin) + ".dat"); iter->second->foutV.open(deviceInfo.cam_name + std::string(ctmBegin) + ".dat",ios::binary); } } return buf_size; } if (ret < 0) { printf("WSAStartup() failed!\n"); continue; //return 0; } } return 0; } int send_cmd(int cam_no,int size,int datatype,char* cam_name,SOCKET& sockClient) { SendStcuct data_send; memset(&data_send,sizeof(struct SendStcuct)); data_send.size = size; data_send.data_type = datatype; data_send.cam_no = cam_no; memcpy(data_send.sendbuf,cam_name,sizeof(data_send)); send(sockClient,0); return 0; } int tcp_recv_conn(char* ip,int port,FrameFunc tcallback(char* a,int height,int width)) { WSADATA wsaData; if (WSAStartup(MAKEWORD(2,&wsaData) != 0) { printf("初始化Winsock失敗"); return -1; } SOCKADDR_IN addrSrv; addrSrv.sin_family = AF_INET; addrSrv.sin_port = htons(port); SOCKET sockClient = socket(AF_INET,sizeof(int)); setsockopt(sockClient,SO_SNDBUF,(char *)&nRecvBuf,sizeof(int)); inet_pton(AF_INET,WSAGetLastError()); return -3; } DeviceInfo deviceInfo; deviceInfo.cam_no = cam_no; deviceInfo.sockClient = sockClient; av_register_all(); unsigned version = avcodec_version(); //printf("FFmpeg version: %d\n",version); //初始化流檔案狀態 time_t tt = time(NULL);//這句返回的只是一個時間cuo tm* t = localtime(&tt); char ctmBegin[20]; strftime(ctmBegin,t); caminfo cinfoInstance; deviceInfo.cam_name = cam_name; //std::string dataName = cam_name; cinfoInstance.foutV.open(cam_name + std::string(ctmBegin) + ".dat",ios::binary); //判斷間隔一小時 cinfoInstance.timeInHour = t->tm_hour; cameraMap[cam_no] = &cinfoInstance; char str3[20]; sprintf(str3,"camno: %d start",cam_no); SLOG1(str3); AVFormatContext *pFormatCtx; int i,videoindex; AVCodecContext *pCodecCtx; AVCodec *pCodec; char filepath[] = "video.264"; //av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); //patha = "C:\\Users\\sbd01\\Pictures\\ffmpegtest\\Debug\\video.dat"; //fp_open = fopen(patha.c_str(),"rb+"); unsigned char *aviobuffer = (unsigned char *)av_malloc(1512); send_cmd(cam_no,sockClient); AVIOContext *avio = avio_alloc_context(aviobuffer,1512,&deviceInfo,read_buffer,NULL); pFormatCtx->pb = avio; //if (avformat_open_input(&pFormatCtx,patha.c_str(),NULL) != 0) { if (avformat_open_input(&pFormatCtx,NULL) != 0) { printf("Couldn't open input stream %d\n",cam_no); return -1; } printf("camno %d find stream\n",cam_no); pFormatCtx->probesize = 1000 * 1024; pFormatCtx->max_analyze_duration = 10 * AV_TIME_BASE; pCodec = NULL; while (pCodec == NULL) { printf("%d start find stream info \n",cam_no); if (avformat_find_stream_info(pFormatCtx,NULL) < 0) { printf("Couldn't find stream info %d\n",cam_no); goto restart_stream; continue; } videoindex = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (videoindex == -1) { videoindex = i; } //break; } if (videoindex == -1) { printf("%d Didn't find a video stream.\n",cam_no); goto restart_stream; } pCodecCtx = pFormatCtx->streams[videoindex]->codec; //pCodec = avcodec_find_decoder(AV_CODEC_ID_H264); pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { printf("%d Codec not found \n",cam_no); goto restart_stream; //return -1; } if (avcodec_open2(pCodecCtx,pCodec,NULL) < 0) { printf("%d Could not open codec.\n",cam_no); goto restart_stream; continue; //return -1; } if (pCodecCtx->width <= 0 || pCodecCtx->height <= 0 || pCodecCtx->height >2000 || pCodecCtx->width >3000) { printf("cam %d pCodecCtx error 1 width %d height %d ",pCodecCtx->width,pCodecCtx->height); goto restart_stream; } goto ok; restart_stream: printf("%d restart 1 ",cam_no); avformat_free_context(pFormatCtx); printf("restart 2 "); //avformat_close_input(&pFormatCtx); pFormatCtx = NULL; pFormatCtx = avformat_alloc_context(); printf("restart 3 "); //av_freep(aviobuffer); //printf("restart 4"); aviobuffer = (unsigned char *)av_malloc(1512); printf("restart 4 "); AVIOContext *avio2 = avio_alloc_context(aviobuffer,NULL); pFormatCtx->pb = avio2; pFormatCtx->probesize = 1000 * 1024; pFormatCtx->max_analyze_duration = 10 * AV_TIME_BASE; if (avformat_open_input(&pFormatCtx,NULL) != 0) { printf("2Couldn't open input stream %d\n",cam_no); //return -1; } printf("restart 5\n"); pCodec = NULL; continue; ok: break; } printf("camno:%d code name :%s width %d height %d\n",pCodec->name,pCodecCtx->height); AVFrame *pFrame,*pFrameYUV; pFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc(); int ret,got_picture; if (g_errorcall) { char* cc; int length = strlen(pCodec->name); cc = new char[length + 1]; strcpy(cc,pCodec->name); g_errorcall(0,pCodecCtx->height,cc,11); } AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket)); struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->pix_fmt,SWS_BICUBIC,NULL); uint8_t *out_buffer; printf("cam %d ready decode 2",cam_no); out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_RGB24,pCodecCtx->height)]; avpicture_fill((AVPicture *)pFrameYUV,out_buffer,pCodecCtx->height); //av_image_fill_arrays(pFrameYUV->data,pFrameYUV->linesize,1); printf("cam %d ready decode 3",cam_no); int dec_error_count = 0; int tmp_test = 0; while (av_read_frame(pFormatCtx,packet) >= 0) { if (packet->stream_index == videoindex) { //tmp_test++; if (packet->size < 50) { av_free_packet(packet); //printf("cam:%d packet is too small %d\n",packet->size); Sleep(3); continue; } if (g_errorcall != 0) g_errorcall(2,"start decode",3); char str_decode[40]; sprintf(str_decode,"cam %d start decode",cam_no); SLOG1(str_decode); ret = avcodec_decode_video2(pCodecCtx,pFrame,&got_picture,packet); if (ret < 0) { dec_error_count++; char str3[80]; sprintf(str3,"%d%s decode_error:%d error_count %d"," Decode Error",dec_error_count); SLOG1(str3); if (g_errorcall != 0) g_errorcall(1,str3,80); printf("cam:%d Decode Error got_picture %d decode_error_num %d\n",got_picture,dec_error_count); if (dec_error_count > 2) { dec_error_count = 0; // restart ffmpeg av_free_packet(packet); Sleep(50); sws_freeContext(img_convert_ctx); img_convert_ctx = NULL; printf("cam %d sws_freeContext 1\n",cam_no); //av_free(out_buffer); //av_free(pFrameYUV); avcodec_close(pCodecCtx); //pCodecCtx = NULL; if (avcodec_open2(pCodecCtx,NULL)<0) { printf("Could not open codec.\n"); return -1; } /*pFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc();*/ //packet = (AVPacket *)av_malloc(sizeof(AVPacket)); printf("cam_no %d avcodec_open2 ok width:%d height:%d\n",pCodecCtx->height); img_convert_ctx = sws_getContext(pCodecCtx->width,NULL); char str3[40]; sprintf(str3,"ffmpeg restart cam %d ",cam_no); SLOG1(str3); continue; } } if (got_picture) { if (g_errorcall != 0) g_errorcall(2,"got_picture",4); char str3[40]; sprintf(str3,"cam %d got_picture",cam_no); SLOG1(str3); sws_scale(img_convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,pFrameYUV->data,pFrameYUV->linesize); /*fwrite(pFrameYUV->data[0],(pCodecCtx->width)*(pCodecCtx->height) * 3,output);*/ tcallback((char*)pFrameYUV->data[0],pCodecCtx->height * pCodecCtx->width * 3,pCodecCtx->width); } } av_free_packet(packet); Sleep(10); } sws_freeContext(img_convert_ctx); //av_free(out_buffer); av_free(pFrameYUV); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; } int tcp_init(char* ip,int port) { int res = tcpInit(ip,port); //printf("conn server\t%d\n",res); return res; } int ffmpeg_recv(int cam_no,int width)) { av_register_all(); unsigned version = avcodec_version(); printf("FFmpeg version: %d\n",version); AVFormatContext *pFormatCtx; int i,videoindex; AVCodecContext *pCodecCtx; AVCodec *pCodec; char filepath[] = "video.264"; avformat_network_init(); pFormatCtx = avformat_alloc_context(); //string patha = "C:\\Users\\sbd01\\Videos\\video.264"; //fp_open = fopen(patha.c_str(),"rb+"); unsigned char *aviobuffer = (unsigned char *)av_malloc(1512); AVIOContext *avio = avio_alloc_context(aviobuffer,&cam_no,NULL); pFormatCtx->pb = avio; if (avformat_open_input(&pFormatCtx,NULL) != 0) { printf("Couldn't open input stream.\n"); return -1; } if (avformat_find_stream_info(pFormatCtx,NULL)<0) { printf("Couldn't find stream information.\n"); return -1; } videoindex = -1; for (i = 0; i<pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoindex = i; break; } if (videoindex == -1) { printf("Didn't find a video stream.\n"); return -1; } pCodecCtx = pFormatCtx->streams[videoindex]->codec; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { printf("Codec not found.\n"); return -1; } if (avcodec_open2(pCodecCtx,NULL)<0) { printf("Could not open codec.\n"); return -1; } AVFrame *pFrame,*pFrameYUV; pFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc(); int ret,got_picture; AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket)); struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(pCodecCtx->width,NULL); uint8_t *out_buffer; out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_RGB24,pCodecCtx->height); while (av_read_frame(pFormatCtx,packet) >= 0) { if (packet->stream_index == videoindex) { ret = avcodec_decode_video2(pCodecCtx,packet); if (ret < 0) { printf("Decode Error.\n"); return -1; } if (got_picture) { sws_scale(img_convert_ctx,output);*/ tcallback((char*)pFrameYUV->data[0],pCodecCtx->width); } } av_free_packet(packet); } sws_freeContext(img_convert_ctx); //fclose(fp_open); //SDL_Quit(); //av_free(out_buffer); av_free(pFrameYUV); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; } //Callback int file_buffer(void *opaque,int buf_size) { FILE *fp_open = (FILE *)opaque; if (!feof(fp_open)) { int true_size = fread(buf,buf_size,fp_open); return true_size; } else { return -1; } } int play_file(char* file_name,int num,videoindex; AVCodecContext *pCodecCtx; AVCodec *pCodec; char filepath[] = "video.264"; //av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); string patha = "C:\\Users\\sbd01\\Videos\\video.264"; //patha = "C:\\Users\\sbd01\\Pictures\\ffmpegtest\\Debug\\video.dat"; FILE *fp_open = fopen(file_name,"rb+"); unsigned char *aviobuffer = (unsigned char *)av_malloc(32768); //printf("avio_alloc_context %d\n",cam_no); AVIOContext *avio = avio_alloc_context(aviobuffer,32768,(void*)fp_open,file_buffer,*pFrameYUV; pFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc(); /*if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf("Could not initialize SDL - %s\n",SDL_GetError()); return -1; }*/ /*int screen_w = 0,screen_h = 0; SDL_Surface *screen; screen_w = pCodecCtx->width; screen_h = pCodecCtx->height; screen = SDL_SetVideoMode(screen_w,screen_h,0); if (!screen) { printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError()); return -1; } SDL_Overlay *bmp; bmp = SDL_CreateYUVOverlay(pCodecCtx->width,SDL_YV12_OVERLAY,screen); SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = screen_w; rect.h = screen_h;*/ //SDL End------------------------ int ret,pCodecCtx->width); } } av_free_packet(packet); } sws_freeContext(img_convert_ctx); //fclose(fp_open); //SDL_Quit(); //av_free(out_buffer); av_free(pFrameYUV); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; }
以上這篇python opencv圖片編碼為h264檔案的例項就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支援我們。