最近接了一个做ffmpeg解码的项目,客户的视频都是在服务器上编码的(H264编码),编码后的数据通过rtp封装发送到客户端,客户端负责解码显示。

前期准备:
下载ffmpeg编译ios版本

查看ffmpeg的例子代码,结果发现都是基于读文件的例子,相信很多没有做过的朋友肯定很着急,呵呵,现在本主给你们发福利了,我把我的代码贴上来,
免得后面涉及到这方面的同学走弯路。

头文件如下:
//
//  H264Decoder.h
//  MICloudPub
//
//  Created by chenjianjun on 14-6-3.
//  Copyright (c) 2014年 hy. All rights reserved.
//

#ifndef __MICloudPub___H264Decoder__
#define __MICloudPub___H264Decoder__

#define X264_DECODER_H long

class H264Decoder
{
public:
    H264Decoder(){}
    ~H264Decoder(){}
public:
    // H264解码初期化
    X264_DECODER_H X264Decoder_Init();
    
    /*
     H264数据解码,解码后的数据为yuv420格式
     dwHandle:初期化时返回的句柄
     pDataIn:待解码数据
     nInSize:待解码数据长度
     pDataOut:解码后的数据,存储空间由调用者申请
     nOutSize:存储空间大小
     
     nWidth:解码后的图像宽度
     nHeight:解码后的图像高度
     */
    int X264Decoder_Decode(X264_DECODER_H dwHandle, uint8_t *pDataIn, int nInSize, uint8_t *pDataOut, int nOutSize, int *nWidth, int *nHeight);
    
    /*
     H264解码销毁
     dwHandle:初期化时返回的句柄
     */
    void X264Decoder_UnInit(X264_DECODER_H dwHandle);
    
protected:
    void pgm_save2(unsigned char *buf, int wrap, int xsize, int ysize, uint8_t *pDataOut)
    {
        int i;
        for(i = 0; i < ysize; i++)
        {
            memcpy(pDataOut+i*xsize, buf + /*(ysize-i)*/i * wrap, xsize);
        }
    }
};

#endif /* defined(__MICloudPub___64Decoder__) */


cpp文件如下:
//
//  H264Decoder.cpp
//  MICloudPub
//
//  Created by chenjianjun on 14-6-3.
//  Copyright (c) 2014年 hy. All rights reserved.
//

#include "H264Decoder.h"

extern "C"
{
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avcodec.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersrc.h"
#include "libavfilter/buffersink.h"
}

typedef struct
{
    struct AVCodec *codec;// Codec
    struct AVCodecContext *c;// Codec Context
    int frame_count;
    struct AVFrame *picture;// Frame
    AVPacket avpkt;
    
    int iWidth;
    int iHeight;
    int comsumedSize;
    int got_picture;
} X264_Decoder_Handle;


X264_DECODER_H H264Decoder::X264Decoder_Init()
{
    X264_Decoder_Handle *pHandle = (X264_Decoder_Handle *)malloc(sizeof(X264_Decoder_Handle));
    if (pHandle == NULL)
    {
        return -1;
    }
    
    avcodec_register_all();
    
    av_init_packet(&(pHandle->avpkt));
    
    pHandle->codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!pHandle->codec)
    {
        return -2;
    }
    
    pHandle->c = avcodec_alloc_context3(pHandle->codec);
    if (!pHandle->c)
    {
        return -3;
    }
    
    pHandle->c->codec_type = AVMEDIA_TYPE_VIDEO;
    pHandle->c->pix_fmt = PIX_FMT_YUV420P;
    
    if (avcodec_open2(pHandle->c, pHandle->codec, NULL) < 0)
    {
        return -4;
    }
    
    pHandle->picture = av_frame_alloc();
    if (!pHandle->picture)
    {
        return -5;
    }
    
    pHandle->frame_count = 0;
    
    return (X264_DECODER_H)pHandle;
}

void H264Decoder::X264Decoder_UnInit(X264_DECODER_H dwHandle)
{
    if (dwHandle <= 0)
    {
        return;
    }
    
    X264_Decoder_Handle *pHandle = (X264_Decoder_Handle *)dwHandle;
    
    avcodec_close(pHandle->c);
    av_free(pHandle->c);
    av_frame_free(&pHandle->picture);
    
    free(pHandle);
}

int H264Decoder::X264Decoder_Decode(X264_DECODER_H dwHandle, uint8_t *pDataIn, int nInSize, uint8_t *pDataOut, int nOutSize, int *nWidth, int *nHeight)
{
    if (dwHandle <= 0)
    {
        return -1;
    }
    
    X264_Decoder_Handle *pHandle = (X264_Decoder_Handle *)dwHandle;
    
    av_init_packet(&(pHandle->avpkt));
    pHandle->avpkt.size = nInSize;
    pHandle->avpkt.data = pDataIn;
    
    while (pHandle->avpkt.size > 0)
    {
        pHandle->comsumedSize = avcodec_decode_video2(pHandle->c, pHandle->picture, &pHandle->got_picture, &(pHandle->avpkt));
        if (pHandle->comsumedSize < 0)
        {
            return -2;
        }
        
        if (pHandle->got_picture)
        {
            *nWidth = pHandle->c->width;
            *nHeight = pHandle->c->height;
            
            pgm_save2(pHandle->picture->data[0],
                      pHandle->picture->linesize[0],
                      pHandle->c->width,
                      pHandle->c->height,pDataOut);
            pgm_save2(pHandle->picture->data[1],
                      pHandle->picture->linesize[1],
                      pHandle->c->width/2,
                      pHandle->c->height/2,
                      pDataOut +pHandle->c->width * pHandle->c->height);
            pgm_save2(pHandle->picture->data[2],
                      pHandle->picture->linesize[2],
                      pHandle->c->width/2,
                      pHandle->c->height/2,
                      pDataOut +pHandle->c->width * pHandle->c->height*5/4);
        }
        
        if (pHandle->avpkt.data)
        {
            pHandle->avpkt.size -= pHandle->comsumedSize;
            pHandle->avpkt.data += pHandle->comsumedSize;
        }
    }
    
    return 0;
}

主要给大家讲讲X264Decoder_Decode这个接口的参数说明:
dwHandle:其实就是我们初期化的那个结构体指针 pDataIn:是你接收到的网络数据包,这个数据包必须是完整的帧(如果一帧数据太大的话,服务器会分包发送,这个时候就需要客户端缓存重组), 不然会解码失败,我做的过程中如果重组后的包不全,h264里面会报很多莫名的错误日志。 nInSize:接收到的网络数据包 pDataOut:解码后的数据存放区 nOutSize:解码后的数据存放区大小 nWidth:解码后的视频宽度 nHeight:解码后的视频高度 函数里面有个循环解码的过程,其实你可以写成解码一次就行了,这样写是考虑解码的时候是根据naul来解码的,有可能一帧数据有几个naul头。 解码后的数据通过opengl渲染,效果还不错。 iphone5 解码720P的数据,每秒20到25帧的数据,cpu达到85%左右。