找回密码
 用户注册

QQ登录

只需一步,快速开始

查看: 4433|回复: 0

运用ffmpeg SDK解264码流(来源FFmpeg工程组)

[复制链接]
发表于 2011-12-27 11:59:48 | 显示全部楼层 |阅读模式
Link:http://www.ffmpeg.com.cn/index.php/%E8%BF%90%E7%94%A8SDK%E8%A7%A3264%E7%A0%81%E6%B5%81运用SDK解264码流 方法一:最好参考ffmpeg自带的两个例子,outputexample.c和apiexample.c文件,亦或直接看ffmpeg和ffplay的例程也可,如果你是需要重量级的使用ffmpeg的话
方法二(比较麻烦一点):这是一个网上的例程http://www.inb.uni-luebeck.de/~boehme/avcodec_sample.cpp为方便全部贴出如下:#include "avcodec.h"
#include "avformat.h"
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
/* new types */
enum bool{false=0,true};
typedef enum bool bool;
static bool GetNextFrame(AVFormatContext *pFormatCtx, AVCodecContext *pCodecCtx,int videoStream, AVFrame *pFrame)
{
   static AVPacket packet;
   static int      bytesRemaining=0;
   static uint8_t  *rawData;
   static bool     fFirstTime=true;
   int             bytesDecoded;
   int             frameFinished;
   // First time we're called, set packet.data to NULL to indicate it
   
// doesn't have to be freed
   if (fFirstTime){
       fFirstTime = false;
       packet.data = NULL;
   }
   // Decode packets until we have decoded a complete frame
   while (true)
   {
       // Work on the current packet until we have decoded all of it
       while (bytesRemaining > 0)
       {
           // Decode the next chunk of data
           bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame,
               &frameFinished, rawData, bytesRemaining);
           // Was there an error?
           if (bytesDecoded < 0){
               fprintf(stderr, "Error while decoding frame\n");
               return false;
           }
           bytesRemaining -= bytesDecoded;
           rawData += bytesDecoded;
           // Did we finish the current frame? Then we can return
           if (frameFinished)
               return true;
       }
       // Read the next packet, skipping all packets that aren't for this
      
// stream
       do{
           // Free old packet
           if(packet.data != NULL)
               av_free_packet(&packet);
           // Read new packet
           if(av_read_packet(pFormatCtx, &packet) < 0)
               goto loop_exit;
       } while(packet.stream_index != videoStream);
       bytesRemaining = packet.size;
       rawData = packet.data;
   }
loop_exit:
   // Decode the rest of the last frame
   bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
               rawData, bytesRemaining);
   // Free last packet
   if(packet.data != NULL)
       av_free_packet(&packet);
   return frameFinished != 0;
}
int main()
{
   AVFormatContext *pFormatCtx;
   int             i, videoStream;
   AVCodecContext  *pCodecCtx;
   AVCodec         *pCodec;
   AVFrame         *pFrame;
   AVFrame         *pFrameYUV;
   clock_t         t;
   double          fps;
   int                y_size, i_frame=0;
   int                numBytes;
   uint8_t         *buffer;        
   char* infile="test.264";
   char* outfile="out.yuv";
   FILE* fp=fopen(outfile, "wb");
   if (fp==NULL){
           fprintf(stderr, "\nCan't open file %s!", infile);
           return -1;
    }
   // Register all formats and codecs
   av_register_all();
   // Open video file
   if (av_open_input_file(&pFormatCtx, infile, NULL, 0, NULL) != 0)
       return -1; // Couldn't open file
   
// Retrieve stream information
   if (av_find_stream_info(pFormatCtx) < 0)
       return -1; // Couldn't find stream information
   
// Dump information about file onto standard error
   dump_format(pFormatCtx, 0, infile, false);
    t = clock();      
   // Find the first video stream
   videoStream = -1;
   for (i=0; i<pFormatCtx->nb_streams; i++)
       if(pFormatCtx->streams[/*此处不隔开,后面的字体全部是斜体*/i]->codec->codec_type == CODEC_TYPE_VIDEO){
           videoStream=i;
           break;
       }
   if (videoStream == -1)
       return -1; // Didn't find a video stream
   
// Get a pointer to the codec context for the video stream
   pCodecCtx = pFormatCtx->streams[videoStream]->codec;
   // Find the decoder for the video stream
   pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
   if (pCodec == NULL)
       return -1; // Codec not found
   
// Inform the codec that we can handle truncated bitstreams -- i.e.,
   
// bitstreams where frame boundaries can fall in the middle of packets
   if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
       pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
   // Open codec
   if (avcodec_open(pCodecCtx, pCodec) < 0)
       return -1; // Could not open codec
   
// Allocate video frame
   pFrame = avcodec_alloc_frame();
  // Allocate an AVFrame structure
   pFrameYUV=avcodec_alloc_frame();
   if(pFrameYUV == NULL)
       return -1;        
   // Determine required buffer size and allocate buffer
   numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,
       pCodecCtx->height);
   buffer = (uint8_t*)malloc(numBytes);        
   // Assign appropriate parts of buffer to image planes in pFrameRGB
   avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P,
       pCodecCtx->width, pCodecCtx->height);
   // Read frames
   while(GetNextFrame(pFormatCtx, pCodecCtx, videoStream, pFrame))
     {        
           img_convert((AVPicture *)pFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pFrame,
                               pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);               
            i_frame++;
            y_size = pCodecCtx->width * pCodecCtx->height;               
#if 1
           if (i_frame==1) //only output onr time
           {
       printf("\n:lolpFrame->linesize[0]=%d, pFrame->linesize[1]=%d, pFrame->linesize[2]=%d!\n",
                pFrame->linesize[0], pFrame->linesize[1], pFrame->linesize[2]);
       printf("\n:lolpFrameYUV->linesize[0]=%d, pFrameYUV->linesize[1]=%d, pFrameYUV->linesize[2]=%d!",
               pFrameYUV->linesize[0], pFrameYUV->linesize[1], pFrameYUV->linesize[2]);
            }
#endif               
            fwrite(pFrameYUV->data[0], 1, y_size, fp);
            fwrite(pFrameYUV->data[1], 1, (y_size/4), fp);
            fwrite(pFrameYUV->data[2], 1, (y_size/4), fp);
      }
    //calculate decode rate
    fclose(fp);
    t = clock() - t;
    fps = (double)(t) / CLOCKS_PER_SEC;
    fps = i_frame / fps;
    printf("\n==>Decode rate %.4f fps!\n", fps);   
   // Free the YUV image
   free(buffer);
   av_free(pFrameYUV);
   // Free the YUV frame
   av_free(pFrame);
   // Close the codec
   avcodec_close(pCodecCtx);
   // Close the video file
   av_close_input_file(pFormatCtx);
   return 0;
}复制代码

将以上例程作如下修改:    1.将GetNextFrame里面的函数av_read_packet改成av_read_frame;    2.解码后一帧YUV的保存,可以按fastreaming的方法(具体方法如下),另外重新定义一YUV420P格式,调用img_convert     将解码后的帧转换到新定义的帧里面去。后者可能稍微耗时一些,但这样接口更清晰一些,便于封装,可直接用于显示等。    3.该程序对解码后的最后一帧,需在循环体while后面,才能再次写解码帧的数据。否则你将会看到解码少1帧。可以参考apiexample里面。    4.如果要使用以下这一段的话      if(g_ffmpeg_pCodec->capabilities&CODEC_CAP_TRUNCATED)           g_ffmpeg_pCodecCtx->flags|= CODEC_FLAG_TRUNCATED;     这一段在使用av_read_frame的时候是一定要去掉的,否则严重丢失数据。我用av_read_frame的时候没有去掉那两句。结果380多桢的一个视频文件只      解出190多桢。而且中间有很多桢都是花的。类似劣质VCD被卡的那种画面。去掉以后一切OK。
fastreaming的方法source :352x288
internal: (16+352+16) x288
FFmpeg adds 16 pixels at four edges of a frame just for enhancing MC/ME
result:
you get:
linesize[0] = (16+352+16) = 384
linesize[1] = linesize[0]/2;
linesize[2] = linesize[0]/2
but data[0],data[1],data[2] is just the address of valid YUV pixels, but the valid data length is 352,176,176 separately
Now I think you can figure out the layout of a YUV frame which is generated by FFmpeg decoder
Please ref the following function to dump yuv data
int g_yuv_index = 1;
void smartAV_dump_yuv(char *file_name,AVPicture *pic,int width,int height)
{
   FILE *fp =0;
   char filename[128],index_name[32];
   int i,j,shift;
   uint8_t *yuv_factor;
   strcpy(filename,file_name);
   sprintf(index_name,"new_yuv_dump_%d.yuv",g_yuv_index);
   strcat(filename,index_name);
   fp = fopen(filename,"wb");
   if(fp) {
       for(i = 0; i < 3; i++) {
           shift = (i == 0 ? 0:1);
           yuv_factor = pic->data;
           for(j = 0; j < (height>>shift); j++) {
               fwrite(yuv_factor,(width>>shift),1,fp);
               yuv_factor += pic->linesize;
           }
       }
       fclose(fp);
       g_yuv_index++;
   }
}
ource :352x288
internal: (16+352+16) x(16+288+16)
FFmpeg adds 16 pixels at four edges of a frame just for enhancing MC/ME
result:
you get:
linesize[0] = (16+352+16) = 384
linesize[1] = linesize[0]/2;
linesize[2] = linesize[0]/2
but data[0],data[1],data[2] is just the address of valid YUV pixels, but the valid data length is 352,176,176 per line  
separately
and there is 288 lines valid for data[0], data[1]: 144,data[2] :144
Now I think you can figure out the layout of a YUV frame which is generated by FFmpeg decoder
Please ref the following function to dump yuv data
int g_yuv_index = 1;
void smartAV_dump_yuv(char *file_name,AVPicture *pic,int width,int height)
{
   FILE *fp =0;
   char filename[128],index_name[32];
   int i,j,shift;
   uint8_t *yuv_factor;
   strcpy(filename,file_name);
   sprintf(index_name,"new_yuv_dump_%d.yuv",g_yuv_index);
   strcat(filename,index_name);
   fp = fopen(filename,"wb");
   if(fp) {
       for(i = 0; i < 3; i++) {
           shift = (i == 0 ? 0:1);
           yuv_factor = pic->data;
           for(j = 0; j < (height>>shift); j++) {
               fwrite(yuv_factor,(width>>shift),1,fp);
               yuv_factor += pic->linesize;
           }
       }
       fclose(fp);
       g_yuv_index++;
   }
}复制代码



有关该问题的讨论帖可参考ffmpeg工程组论坛中的相关讨论:有关运用SDK解264码流的讨论
您需要登录后才可以回帖 登录 | 用户注册

本版积分规则

Archiver|手机版|小黑屋|ACE Developer ( 京ICP备06055248号 )

GMT+8, 2024-5-2 14:20 , Processed in 0.013908 second(s), 6 queries , Redis On.

Powered by Discuz! X3.5

© 2001-2023 Discuz! Team.

快速回复 返回顶部 返回列表