上一节讲述了新版的API接口对序列化后的h264完整帧数据进行解码保存YUV数据

老版本的API通过读取文件搜索流信息获取解码环境,直接从文件里面获取帧数据,无需序列化,缺点是无法读取内存数据

解码缩放流程图如下

FFMPEG教程(二)修改分辨率sws_scale方法_c++

demo为对解码后的视频进行sws_scale缩放后保存为YUV数据

对输入的480x272 h264文件进行分辨率转换为360x204

scaling_video.c

#include <stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
// 视频文件解码为 YUV 数据
FILE*g_fileYuv = NULL;
static void saveYuv(AVFrame *frame)
{
    if(g_fileYuv == NULL)
    {
        g_fileYuv = fopen("tmp.yuv", "wb+");
        if(!g_fileYuv){
            printf("fopen error\n");
        }
    }
    if(g_fileYuv)
    {
        for(int i=0; i<frame->height; i++)
        {
            fwrite((char *)(frame->data[0] + i * frame->linesize[0]), frame->width,1,g_fileYuv);
        }
        int loop = frame->height / 2;
        int len_uv = frame->width / 2;
        for(int i=0; i<loop; i++)
        {
            fwrite((char *)(frame->data[1] + i * frame->linesize[1]), len_uv,1,g_fileYuv);
        }
        for(int i=0; i<loop; i++)
        {
            fwrite((char *)(frame->data[2] + i * frame->linesize[2]), len_uv,1,g_fileYuv);
        }
    }
}
int main(int argc, char *argv[])
{
    AVFormatContext *pFormatCtx; // AVFormatContext 是包含码流参数较多的结构体
    AVCodecContext *pDecoderCtx;   //解码器参数
    AVCodec *pDecoder;             // AVCodec 是存储编解码器信息的结构体
    AVFrame *frame, *sw_frame; // AVFrame 结构体一般用于存储原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对音频来说是PCM)
    AVPacket *packet;            // 包,从流中读出的一段数据
    struct SwsContext *img_convert_ctx = NULL;
    uint8_t *out_buffer;
    int videoindex = -1;
    int y_size;
    int ret, got_picture;
    char filepath[] = "in.h264";
    FILE *fp_yuv = fopen("test.yuv", "wb+");
    if(!fp_yuv){
        printf("open err..\n");
        return 0;
    }
    // A1. 打开视频文件:读取文件头,将文件格式信息存储在"AVFormatContext pFormatCtx"中
    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0)
    {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    // A2. 搜索流信息:读取一段视频文件数据,尝试解码,将取到的流信息填入 AVFormatContext pFormatCtx->streams
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
    {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    // A3. 查找第一个视频流
    for (int i = 0; i < pFormatCtx->nb_streams; i++)
    {
        AVStream *st = pFormatCtx->streams[i];
        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoindex = i;
            break;
        }
    }
    if (videoindex == -1)
    {
        printf("Didn't find a video stream.\n");
        return -1;
    }
    // A5.1 获取解码器参数 AVCodecContext pDecoderCtx
    pDecoderCtx = pFormatCtx->streams[videoindex]->codec;
    printf("codec:%d\n",pDecoderCtx->codec_id);
    // A5.2 获取解码器 根据编码器的ID查找FFmpeg的解码器
    pDecoder = avcodec_find_decoder(pDecoderCtx->codec_id);
    if (pDecoder == NULL)
    {
        printf("Codec not found.\n");
        return -1;
    }
    // 初始化一个视音频编解码器的AVCodecContext
    if (avcodec_open2(pDecoderCtx, pDecoder, NULL) < 0)
    {
        printf("Could not open codec.\n");
        return -1;
    }
    // 一些内存分配
    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    frame = av_frame_alloc();
    sw_frame = av_frame_alloc();
    sw_frame->format = AV_PIX_FMT_YUV420P; //  int in_w = 704, in_h = 576;
    // TODO 一定要注意先宽后高
    sw_frame->width = 360;     //272
    sw_frame->height = 204;    //480
    // TODO: 填充的buf为输出分辨率大小
    out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, sw_frame->width, sw_frame->height));
    // 为已经分配空间的结构体AVPicture挂上一段用于保存数据的空间
    // AVFrame/AVPicture有一个data[4]的数据字段,buffer里面存放的只是yuv这样排列的数据,
    // 而经过fill 之后,会把buffer中的yuv分别放到data[0],data[1],data[2]中。
    // av_samples_fill_arrays(sw_frame->data,sw_frame->linesize,out_buffer, AV_PIX_FMT_YUV420P, sw_frame->width, sw_frame->height,1);
    avpicture_fill((AVPicture *)sw_frame, out_buffer, AV_PIX_FMT_YUV420P, sw_frame->width, sw_frame->height);
    // Output Info  -----------------------------
    printf("--------------- File Information ----------------\n");
    av_dump_format(pFormatCtx, 0, filepath, 0);
    printf("-------------------------------------------------\n");
    // 初始化一个 SwsContext
    // 参数:源图像的宽,源图像的高,源图像的像素格式,目标图像的宽,目标图像的高,目标图像的像素格式,设定图像拉伸使用的算法
    img_convert_ctx = sws_getContext(pDecoderCtx->width, pDecoderCtx->height, pDecoderCtx->pix_fmt,
                                     sw_frame->width, sw_frame->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    if(!img_convert_ctx){
        printf("error ..\n");
        return 0;
    }
    while (av_read_frame(pFormatCtx, packet) >= 0)
    {
        if (packet->stream_index == videoindex)
        {
            ret = avcodec_send_packet(pDecoderCtx, packet);
            if (ret < 0)
            {
                fprintf(stderr, "Error during decoding\n");
                return ret;
            }
            while (ret >= 0)
            {
                ret = avcodec_receive_frame(pDecoderCtx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                    break;
                else if (ret < 0)
                {
                    fprintf(stderr, "Error during decoding\n");
                    return ret;
                }
                // 转换像素
                sws_scale(img_convert_ctx, (const uint8_t *const *)frame->data, frame->linesize,
                          0, pDecoderCtx->height,
                          sw_frame->data, sw_frame->linesize);
                saveYuv(sw_frame);
            }
        }
        av_packet_unref(packet);
    }
    
    printf("Succeed to decode frame!\n");
    
    // flush decoder
    ret = avcodec_send_packet(pDecoderCtx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error during decoding\n");
        return ret;
    }
    while (ret >= 0)
    {
        ret = avcodec_receive_frame(pDecoderCtx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            break;
        else if (ret < 0)
        {
            fprintf(stderr, "Error during decoding\n");
            return ret;
        }
        // 转换像素
        sws_scale(img_convert_ctx, (const uint8_t *const *)frame->data, frame->linesize,
                  0, pDecoderCtx->height,
                  sw_frame->data, sw_frame->linesize);
        saveYuv(sw_frame);
        printf("Succeed to decode all frame!\n");
    }
    if(img_convert_ctx)
        sws_freeContext(img_convert_ctx);
    if(fp_yuv)
        fclose(fp_yuv);
    if(g_fileYuv){
        fclose(g_fileYuv);
    }
    av_frame_free(&sw_frame);
    av_frame_free(&frame);
    avcodec_close(pDecoderCtx);
    avformat_close_input(&pFormatCtx);
    return 0;
}

编译

example:scaling_video.c
    gcc -o scaling_video scaling_video.c -I /home/workspace/dengzr/ffmpeg-4.4-x86_64/include/ -L /home/workspace/dengzr/ffmpeg-4.4-x86_64/lib/ -Wl,-Bstatic -Wl,-Bdynamic -lpthread -Wl,-Bstatic -Wl,-Bdynamic -lstdc++ -Wl,-Bstatic -Wl,-Bdynamic -ldl -Wl,-Bstatic -Wl,-Bdynamic -lavcodec -lavformat -lswscale -lavutil -lavfilter -lfdk-aac -lvpx -lpostproc -lavfilter -lm -lva -lOpenCL -lmfx -lstdc++ -ldl -lavformat -lm -lz -lavcodec -lvpx -lm -lvpx -lm -lvpx -lm -lvpx -lm -lz -lfdk-aac -lvorbis -lvorbisenc -lx264 -lpthread -lm -ldl -lx265 -lva -lmfx -lstdc++ -ldl -lpostproc -lm -lswscale -lm -lswresample -lm -lavutil -lva-drm -lva -lva-x11 -lva -lm -ldrm -lmfx -lstdc++ -ldl -lOpenCL -lva -lX11 -lXext -lva -ldrm -lm -lpthread -lz -Wl,-Bstatic -Wl,-Bdynamic -lavcodec -lavformat -lswscale -lavutil -lavfilter -lfdk-aac -lvpx -lpostproc -lOpenCL -lmfx -lstdc++ -ldl -lvorbis -lvorbisenc -lx264 -lx265 -lswresample -lva-drm -lva-x11 -lX11 -lXext -Wl,-Bstatic -Wl,-Bdynamic -L /home/workspace/dengzr/ffmpeg-4.4-x86_64/lib/  -L /home/workspace/dengzr/linux-x64/lib -I /home/workspace/dengzr/linux-x64/include/ -lSDL2
.PHONY:clean
clean:
    rm -f scaling_video

运行

./scaling_video

验证

ffplay -video_size 360x204 -i tmp.yuv