文章目录

  • 定义水印滤镜
  • 定义输入和输出
  • 项目工程
  • 使用效果



为了声明视频的原创性,防止别人抄袭搬运,很多时候我们会给视频资源添加水印进行版权保护。FFmpeg的滤镜模块支持给视频资源添加水印。这里就介绍一下如何通过FFmpeg库给视频资源添加图片水印。视频添加水印的流程如下图所示:


FFmpeg 视频水印时长 android ffmpeg视频加水印_音视频


首先我们对原始视频文件进行解封装,分离出视频流和音频流。音频流不做处理直接输出到文件中。视频流解析之后,通过滤镜将解析出来的视频帧和图片数据进行组合拼接之后进行输出。将输出的音视频流进行封装之后输出的视频文件就携带了对应的图片水印。

定义水印滤镜

水印滤镜主要实现两个功能
1.对原始的图片尺寸进行缩放
2.将缩放后的图片和原始的视频帧进行组合
对应的滤镜实现如下所示:

//滤镜首先将图片缩放到50*50
//然后将缩放之后的图片放到原始帧的(15,15)的位置
const char *filter_descr = "[in]scale=50:50[scl];[in1][scl]overlay=15:15";
static int init_filters(const char *filters_descr)
{
	char args[512];
	char logoargs[512];
	int ret = 0;
	
	//定义使用到的滤镜
	const AVFilter *buffersrc = avfilter_get_by_name("buffer");
	const AVFilter *buffersrc1 = avfilter_get_by_name("buffer");
	const AVFilter *buffersink = avfilter_get_by_name("buffersink");

	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *overlayFrameInOut = avfilter_inout_alloc();
	AVFilterInOut *inputs = avfilter_inout_alloc();
	AVRational time_base = inputFormatContext->streams[videoStreamIndex]->time_base;
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };

	filterGraph = avfilter_graph_alloc();
	if (!outputs || !inputs || !filterGraph) {
		ret = AVERROR(ENOMEM);
		goto end;
	}

	//原始帧的格式
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		outputVideoCodecContext->width, outputVideoCodecContext->height, outputVideoCodecContext->pix_fmt,
		time_base.num, time_base.den,
		outputVideoCodecContext->sample_aspect_ratio.num, outputVideoCodecContext->sample_aspect_ratio.den);

	//图片的格式
	snprintf(logoargs, sizeof(logoargs),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		logoDecoderContext->width, logoDecoderContext->height, logoDecoderContext->pix_fmt,
		time_base.num, time_base.den,
		outputVideoCodecContext->sample_aspect_ratio.num, outputVideoCodecContext->sample_aspect_ratio.den);

	//缩放滤镜的输入(图片)
	ret = avfilter_graph_create_filter(&bufferSourceContext, buffersrc, "in",
		logoargs, NULL, filterGraph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		goto end;
	}

	//水印滤镜的输入(原始帧)
	ret = avfilter_graph_create_filter(&bufferSource1Context, buffersrc1, "in1",
		args, NULL, filterGraph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		goto end;
	}

	//设置滤镜输出
	ret = avfilter_graph_create_filter(&bufferSinkContext, buffersink, "out",
		NULL, NULL, filterGraph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		goto end;
	}

	//设置输出的像素格式
	ret = av_opt_set_int_list(bufferSinkContext, "pix_fmts", pix_fmts,
		AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		goto end;
	}

	//设置滤镜的端点
	//outputs变量意指bufferSourceContext滤镜的输出引脚(outputpad)
	//src缓冲区(bufferSourceContext滤镜)的输出必须连到filters_descr中第一个
	//滤镜的输输入filtersdescr中第一个滤镜的输入标号未指定,故默认为
	//"in"此处将buffersrc_ctx的输出标号也设为"in"
	outputs->name = av_strdup("in");
	outputs->filter_ctx = bufferSourceContext;
	outputs->pad_idx = 0;
	outputs->next = overlayFrameInOut;

	overlayFrameInOut->name = av_strdup("in1");
	overlayFrameInOut->filter_ctx = bufferSource1Context;
	overlayFrameInOut->pad_idx = 0;
	overlayFrameInOut->next = NULL;

	//inputs变量意指bufferSinkContext滤镜的输入引脚(inputpad)
	//sink缓冲区(bufferSinkContext滤镜)的输入必须连到filters_descr中最后
	//一个滤镜的输出filtersdescr中最后一个滤镜的输出标号未指定,故
	//默认为"out"此处将bufferSinkContext的输出标号也设为"out"
	inputs->name = av_strdup("out");
	inputs->filter_ctx = bufferSinkContext;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	// 在filters_descr字符串中,如果第一个滤镜未指定输入标号,则假定为"in"
	// 如果最后一个滤镜未指定输出标号,则假定为"out"。几个实参说明如下:
	// @filter_graph  滤镜容器
	// @filters_descr 描述滤镜的字符串
	// @inputs        指向滤镜图中输入链表
	// @outputs       指向滤镜图的输出链表
	// 调用前:filterGraph包含三个滤镜两个buffersrcctx和一个buffersink_ctx
	// 调用后:filtersdescr描述的滤镜图插入到filter_graph中,buffersrcctx连接到filtersdescr
	// 的输入,filtersdescr的输出连接到buffersink_ctx,filters_descr只进行了解析而不
	// 建立内部滤镜间的连接。filters_desc与filter_graph间的连接是利用AVFilterInOut inputs
	// 和AVFilterInOut inputs连接起来的,AVFilterInOut是一个链表,最终可用的连在一起的
	// 滤镜链/滤镜图就是通过这个链表串在一起的。
	if ((ret = avfilter_graph_parse_ptr(filterGraph, filters_descr,
		&inputs, &outputs, NULL)) < 0)
		goto end;

	if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0)
		goto end;

	//输出滤镜的调用流程
	char *graph_str = avfilter_graph_dump(filterGraph, NULL);
	printf("%s", graph_str);

end:
	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);

	return ret;
}

整个滤镜的调用流程如下图所示:

FFmpeg 视频水印时长 android ffmpeg视频加水印_C++_02

定义输入和输出

1.定义视频输入

static int open_meida_file(std::string file_path)
{
	int ret;
	AVCodec *dec;
	//打开媒体文件
	if ((ret = avformat_open_input(&inputFormatContext, file_path.c_str(), NULL, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
		return ret;
	}
	if ((ret = avformat_find_stream_info(inputFormatContext, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
		return ret;
	}

	//查找音视频流
	ret = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
		return ret;
	}
	videoStreamIndex = ret;

	ret = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
		return ret;
	}
	audioStreamIndex = ret;

	//输出视频上下文
	outputVideoCodecContext = avcodec_alloc_context3(outputVideoCodec);
	if (!outputVideoCodecContext)
		return AVERROR(ENOMEM);
	//依据输入媒体文件创建输出视频编码器的上下文
	outputVideoCodecContext->time_base = inputFormatContext->streams[videoStreamIndex]->time_base;
	outputVideoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	outputVideoCodecContext->sample_fmt = AV_SAMPLE_FMT_S16;
	outputVideoCodecContext->width = inputFormatContext->streams[videoStreamIndex]->codecpar->width;
	outputVideoCodecContext->height = inputFormatContext->streams[videoStreamIndex]->codecpar->height;
	outputVideoCodecContext->bit_rate = inputFormatContext->streams[videoStreamIndex]->codecpar->bit_rate;
	outputVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
	outputVideoCodecContext->profile = FF_PROFILE_H264_MAIN;
	outputVideoCodecContext->level = 41;
	outputVideoCodecContext->thread_count = 8;

	//视频解码器的上下文
	videoDecoderContext = avcodec_alloc_context3(dec);
	if (!videoDecoderContext)
		return AVERROR(ENOMEM);
	avcodec_parameters_to_context(videoDecoderContext, inputFormatContext->streams[videoStreamIndex]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(videoDecoderContext, dec, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
		return ret;
	}
	return 0;
}

2.定义图片输入

//打开图片资源
static int open_logo_file(std::string logo_path)
{
	int ret = -1;
	AVCodec *logoDec;
    //打开文件解析上下文
	if ((ret = avformat_open_input(&logoFormatContext, logo_path.c_str(), NULL, NULL)) < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot open logo input file\n");
		return ret;
	}
	if ((ret = avformat_find_stream_info(logoFormatContext, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find logo stream information\n");
		return ret;
	}

	ret = av_find_best_stream(logoFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &logoDec, 0);
	if (ret < 0) {
	av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the logo input file\n");
	return ret;
	}
	//图片解码器上下文
	logoDecoderContext = avcodec_alloc_context3(logoDec);
	if (!logoDecoderContext)
		return AVERROR(ENOMEM);

	avcodec_parameters_to_context(logoDecoderContext, logoFormatContext->streams[0]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(logoDecoderContext, logoDec, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
		return ret;
	}
	return 0;
}

3.定义视频输出

//初始化输出流
static int initializeOutputFile(std::string output_path)
{
	int error;
	//视频流
	avformat_alloc_output_context2(&outputFormatContext, NULL, "flv", output_path.c_str());
	if (!outputFormatContext)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not create output context!");
		return -1;
	}

	videoStream = avformat_new_stream(outputFormatContext, outputVideoCodec);
	if (!videoStream)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not create Video Stream");
		return -1;
	}

	avcodec_parameters_from_context(videoStream->codecpar,outputVideoCodecContext);
	videoStream->codecpar->codec_tag = 0;

	//音频流
	audioStream = avformat_new_stream(outputFormatContext, NULL);
	if (!audioStream)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not create Live Audio Stream");
		return -1;
	}
	audioStream->codecpar = inputFormatContext->streams[audioStreamIndex]->codecpar;
	audioStream->codecpar->codec_tag = 0;

	//打开输出IO
	error = avio_open(&outputFormatContext->pb, output_path.c_str(), AVIO_FLAG_WRITE);
	if (error < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not open output url");
		return -2;
	}

	//输出文件头
	error = avformat_write_header(outputFormatContext, NULL);
	if (error < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not write header to output format context");
		return -1;
	}
	return 0;
}

//将加水印之后的图像帧输出到文件中
static int output_frame(AVFrame *frame, AVRational time_base)
{
	int x, y;
	uint8_t *p0, *p;
	int64_t delay;
	int code;
	int gotOutput;

	AVPacket packet = { 0 };
	av_init_packet(&packet);

	int ret = avcodec_send_frame(outputVideoCodecContext, frame);
	if (ret < 0) {
		printf("Error sending a frame for encoding\n");
		return -1;
	}
	while (ret >= 0) 
	{
		ret = avcodec_receive_packet(outputVideoCodecContext, &packet);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 
		{
			return (ret == AVERROR(EAGAIN)) ? 0 : 1;
		}
		else if (ret < 0) {
			printf("Error during encoding\n");
			exit(1);
		}

		gotOutput = 1;
		AVRational avTimeBaseQ = { 1, AV_TIME_BASE };
		int64_t ptsTime = av_rescale_q(frame->pts, inputFormatContext->streams[videoStreamIndex]->time_base, avTimeBaseQ);
		int64_t nowTime = av_gettime() - startTime;

		if ((ptsTime > nowTime)) 
		{
			int64_t sleepTime = ptsTime - nowTime;
			av_usleep((sleepTime));
		}
		else
		{
			printf("not sleeping\n");
		}

		if (gotOutput)
		{
			packet.pts = av_rescale_q_rnd(packet.pts, time_base, videoStream->time_base,(AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.dts = av_rescale_q_rnd(packet.dts, time_base, videoStream->time_base, (AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.stream_index = videoStream->index;
			code = av_interleaved_write_frame(outputFormatContext, &packet);
			av_packet_unref(&packet);

			if (code < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "[ERROR] Writing Live Stream Interleaved Frame");
			}
		
		if (ret < 0) {
			exit(1);
		}
		av_packet_unref(&packet);
		}
	}
}

项目工程

完整的调用流程如下图所示:

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/time.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <stdio.h>
#include <stdlib.h>
}

#include <string>
#include <thread>

//水印的大小为50pix*50pix
//放的位置为15*15
const char *filter_descr = "[in]scale=50:50[scl];[in1][scl]overlay=15:15";

static AVFormatContext *outputFormatContext;
static AVFormatContext *inputFormatContext;   //输入媒体文件上下文
static AVFormatContext *logoFormatContext;    //图标文件的上下文

static AVCodecContext *videoDecoderContext;   //视频解码器上下文
static AVCodecContext *logoDecoderContext;
static AVCodecContext *outputVideoCodecContext;

static AVCodec *outputVideoCodec;
static AVCodec *outputAudioCodec;

//输出音频流和视频流
static AVStream *audioStream;
static AVStream *videoStream;

AVFilterContext *bufferSinkContext;
AVFilterContext *bufferSourceContext;
AVFilterContext *bufferSource1Context;
AVFilterGraph *filterGraph;

//视频流索引和音频流索引
static int videoStreamIndex = -1;
static int audioStreamIndex = -1;
static int64_t lastPts = AV_NOPTS_VALUE;
static int64_t startTime; //起始时间

static std::string input_meida_path;  //媒体文件的地址
static std::string logo_path;         //图标文件的地址
static std::string output_meida_path; //输出媒体文件的地址

//解析图片帧的线程
static std::thread decodeTask;

//水印帧
static AVFrame* logoFrame;
static AVFrame *filt_frame;

//打开图片的解码器
static int open_logo_file(std::string logo_path)
{
	int ret = -1;
	AVCodec *logoDec;
	if ((ret = avformat_open_input(&logoFormatContext, logo_path.c_str(), NULL, NULL)) < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot open logo input file\n");
		return ret;
	}

	if ((ret = avformat_find_stream_info(logoFormatContext, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find logo stream information\n");
		return ret;
	}

	ret = av_find_best_stream(logoFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &logoDec, 0);
	if (ret < 0) {
	av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the logo input file\n");
	return ret;
	}

	//图片解码器上下文
	logoDecoderContext = avcodec_alloc_context3(logoDec);
	if (!logoDecoderContext)
		return AVERROR(ENOMEM);

	avcodec_parameters_to_context(logoDecoderContext, logoFormatContext->streams[0]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(logoDecoderContext, logoDec, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
		return ret;
	}
	return 0;
}

static int open_meida_file(std::string file_path)
{
	int ret;
	AVCodec *dec;
	
	if ((ret = avformat_open_input(&inputFormatContext, file_path.c_str(), NULL, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
		return ret;
	}
	if ((ret = avformat_find_stream_info(inputFormatContext, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
		return ret;
	}

	//查找音视频流
	ret = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
		return ret;
	}
	videoStreamIndex = ret;

	ret = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
		return ret;
	}
	audioStreamIndex = ret;

	//输出的视频上下文
	outputVideoCodecContext = avcodec_alloc_context3(outputVideoCodec);
	if (!outputVideoCodecContext)
		return AVERROR(ENOMEM);
	//依据输入媒体文件创建输出视频编码器的上下文
	outputVideoCodecContext->time_base = inputFormatContext->streams[videoStreamIndex]->time_base;
	outputVideoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	outputVideoCodecContext->sample_fmt = AV_SAMPLE_FMT_S16;
	outputVideoCodecContext->width = inputFormatContext->streams[videoStreamIndex]->codecpar->width;
	outputVideoCodecContext->height = inputFormatContext->streams[videoStreamIndex]->codecpar->height;
	outputVideoCodecContext->bit_rate = inputFormatContext->streams[videoStreamIndex]->codecpar->bit_rate;
	outputVideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
	outputVideoCodecContext->profile = FF_PROFILE_H264_MAIN;
	outputVideoCodecContext->level = 41;
	outputVideoCodecContext->thread_count = 8;

	//视频解码器的上下文
	videoDecoderContext = avcodec_alloc_context3(dec);
	if (!videoDecoderContext)
		return AVERROR(ENOMEM);
	avcodec_parameters_to_context(videoDecoderContext, inputFormatContext->streams[videoStreamIndex]->codecpar);

	//打开解码器
	if ((ret = avcodec_open2(videoDecoderContext, dec, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
		return ret;
	}
	return 0;
}

static int init_filters(const char *filters_descr)
{
	char args[512];
	char logoargs[512];
	int ret = 0;
	
	//定义使用到的滤镜
	const AVFilter *buffersrc = avfilter_get_by_name("buffer");
	const AVFilter *buffersrc1 = avfilter_get_by_name("buffer");
	const AVFilter *buffersink = avfilter_get_by_name("buffersink");

	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *overlayFrameInOut = avfilter_inout_alloc();
	AVFilterInOut *inputs = avfilter_inout_alloc();
	AVRational time_base = inputFormatContext->streams[videoStreamIndex]->time_base;
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };

	filterGraph = avfilter_graph_alloc();
	if (!outputs || !inputs || !filterGraph) {
		ret = AVERROR(ENOMEM);
		goto end;
	}

	//原始帧的格式
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		outputVideoCodecContext->width, outputVideoCodecContext->height, outputVideoCodecContext->pix_fmt,
		time_base.num, time_base.den,
		outputVideoCodecContext->sample_aspect_ratio.num, outputVideoCodecContext->sample_aspect_ratio.den);

	//图片帧的格式
	snprintf(logoargs, sizeof(logoargs),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		logoDecoderContext->width, logoDecoderContext->height, logoDecoderContext->pix_fmt,
		time_base.num, time_base.den,
		outputVideoCodecContext->sample_aspect_ratio.num, outputVideoCodecContext->sample_aspect_ratio.den);

	//缩放滤镜的输入(图片)
	ret = avfilter_graph_create_filter(&bufferSourceContext, buffersrc, "in",
		logoargs, NULL, filterGraph);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		goto end;
	}

	//水印滤镜的输入
	ret = avfilter_graph_create_filter(&bufferSource1Context, buffersrc1, "in1",
		args, NULL, filterGraph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		goto end;
	}

	//设置滤镜输出
	ret = avfilter_graph_create_filter(&bufferSinkContext, buffersink, "out",
		NULL, NULL, filterGraph);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		goto end;
	}

	//设置输出的像素格式
	ret = av_opt_set_int_list(bufferSinkContext, "pix_fmts", pix_fmts,
		AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		goto end;
	}

	//设置滤镜的端点
	//outputs变量意指bufferSourceContext滤镜的输出引脚(outputpad)
	//src缓冲区(bufferSourceContext滤镜)的输出必须连到filters_descr中第一个
	//滤镜的输输入filtersdescr中第一个滤镜的输入标号未指定,故默认为
	//"in"此处将buffersrc_ctx的输出标号也设为"in"
	outputs->name = av_strdup("in");
	outputs->filter_ctx = bufferSourceContext;
	outputs->pad_idx = 0;
	outputs->next = overlayFrameInOut;

	overlayFrameInOut->name = av_strdup("in1");
	overlayFrameInOut->filter_ctx = bufferSource1Context;
	overlayFrameInOut->pad_idx = 0;
	overlayFrameInOut->next = NULL;

	//inputs变量意指bufferSinkContext滤镜的输入引脚(inputpad)
	//sink缓冲区(bufferSinkContext滤镜)的输入必须连到filters_descr中最后
	//一个滤镜的输出filtersdescr中最后一个滤镜的输出标号未指定,故
	//默认为"out"此处将bufferSinkContext的输出标号也设为"out"
	inputs->name = av_strdup("out");
	inputs->filter_ctx = bufferSinkContext;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	// 在filters_descr字符串中,如果第一个滤镜未指定输入标号,则假定为"in";
	// 如果最后一个滤镜未指定输出标号,则假定为"out"。几个实参说明如下:
	// @filter_graph  滤镜容器
	// @filters_descr 描述滤镜的字符串
	// @inputs        指向滤镜图中输入链表
	// @outputs       指向滤镜图的输出链表
	// 调用前:filterGraph包含三个滤镜两个buffersrcctx和一个buffersink_ctx
	// 调用后:filtersdescr描述的滤镜图插入到filter_graph中,buffersrcctx连接到filtersdescr
	// 的输入,filtersdescr的输出连接到buffersink_ctx,filters_descr只进行了解析而不
	// 建立内部滤镜间的连接。filters_desc与filter_graph间的连接是利用AVFilterInOut inputs
	// 和AVFilterInOut inputs连接起来的,AVFilterInOut是一个链表,最终可用的连在一起的
	// 滤镜链/滤镜图就是通过这个链表串在一起的。
	if ((ret = avfilter_graph_parse_ptr(filterGraph, filters_descr,
		&inputs, &outputs, NULL)) < 0)
		goto end;

	if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0)
		goto end;

	//输出滤镜的调用流程
	char *graph_str = avfilter_graph_dump(filterGraph, NULL);
	printf("%s", graph_str);

end:
	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);

	return ret;
}

//将加水印之后的图像帧输出到文件中
static int output_frame(AVFrame *frame, AVRational time_base)
{
	int x, y;
	uint8_t *p0, *p;
	int64_t delay;
	int code;
	int gotOutput;

	AVPacket packet = { 0 };
	av_init_packet(&packet);

	int ret = avcodec_send_frame(outputVideoCodecContext, frame);
	if (ret < 0) {
		printf("Error sending a frame for encoding\n");
		return -1;
	}
	while (ret >= 0) 
	{
		ret = avcodec_receive_packet(outputVideoCodecContext, &packet);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 
		{
			return (ret == AVERROR(EAGAIN)) ? 0 : 1;
		}
		else if (ret < 0) {
			printf("Error during encoding\n");
			exit(1);
		}

		gotOutput = 1;
		AVRational avTimeBaseQ = { 1, AV_TIME_BASE };
		int64_t ptsTime = av_rescale_q(frame->pts, inputFormatContext->streams[videoStreamIndex]->time_base, avTimeBaseQ);
		int64_t nowTime = av_gettime() - startTime;

		if ((ptsTime > nowTime)) 
		{
			int64_t sleepTime = ptsTime - nowTime;
			av_usleep((sleepTime));
		}
		else
		{
			printf("not sleeping\n");
		}

		if (gotOutput)
		{
			packet.pts = av_rescale_q_rnd(packet.pts, time_base, videoStream->time_base,(AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.dts = av_rescale_q_rnd(packet.dts, time_base, videoStream->time_base, (AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.stream_index = videoStream->index;
			code = av_interleaved_write_frame(outputFormatContext, &packet);
			av_packet_unref(&packet);

			if (code < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "[ERROR] Writing Live Stream Interleaved Frame");
			}
		
		if (ret < 0) {
			exit(1);
		}
		av_packet_unref(&packet);
		}
	}
}

static int initializeOutputFile(std::string output_path)
{
	int error;
	//视频流
	avformat_alloc_output_context2(&outputFormatContext, NULL, "flv", output_path.c_str());
	if (!outputFormatContext)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not create output context!");
		return -1;
	}

	videoStream = avformat_new_stream(outputFormatContext, outputVideoCodec);
	if (!videoStream)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not create Video Stream");
		return -1;
	}

	avcodec_parameters_from_context(videoStream->codecpar,outputVideoCodecContext);
	videoStream->codecpar->codec_tag = 0;

	//音频流
	audioStream = avformat_new_stream(outputFormatContext, NULL);
	if (!audioStream)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not create Live Audio Stream");
		return -1;
	}
	audioStream->codecpar = inputFormatContext->streams[audioStreamIndex]->codecpar;
	audioStream->codecpar->codec_tag = 0;

	//打开输出IO
	error = avio_open(&outputFormatContext->pb, output_path.c_str(), AVIO_FLAG_WRITE);
	if (error < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not open output url");
		return -2;
	}

	//输出文件头
	error = avformat_write_header(outputFormatContext, NULL);
	if (error < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] Could not write header to output format context");
		return -1;
	}
	return 0;
}



int main(int argc, char **argv)
{
	int ret;
	AVPacket packet;
	AVFrame *frame;
	
	int error;

	avformat_network_init();

	if (argc != 4) 
	{
		printf("usage argv[1] input file path argv[2] logo path argv[3] outputpath");
		exit(1);
	}
	//媒体文件地址和图标地址
	input_meida_path = std::string(argv[1]);
	logo_path = std::string(argv[2]);
	output_meida_path = std::string(argv[3]);

	//输出视频上下文
	outputVideoCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!outputVideoCodec)
	{
		printf("Can't find h264 encoder");
		exit(1);
	}
	//输出音频上下文
	outputAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
	if (!outputVideoCodec)
	{
		printf("Can't find aac encoder");
		exit(1);
	}

	//数据帧和滤镜帧
	frame = av_frame_alloc();
	filt_frame = av_frame_alloc();
	logoFrame = av_frame_alloc();

	if (!frame || !filt_frame) {
		printf("Could not allocate frame or filter frame");
		exit(1);
	}

	//打开媒体文件
	if ((ret = open_meida_file(input_meida_path)) < 0)
		goto end;

	//打开图标文件
	if ((ret = open_logo_file(logo_path)) < 0)
		goto end;

	//初始化滤镜
	if ((ret = init_filters(filter_descr)) < 0)
		goto end;

	//打开输出的编码器
	error = avcodec_open2(outputVideoCodecContext, outputVideoCodec, NULL);
	if (error < 0)
	{
		printf("[ERROR] avcodec_open2");
		exit(1);
	}

	error = av_opt_set(outputVideoCodecContext->priv_data, "preset", "ultrafast", 0);
	if (error < 0)
	{
		printf("[ERROR] av_opt_set preset");
		exit(1);
	}

	//初始化输出媒体文件
	ret = initializeOutputFile(output_meida_path);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "[ERROR] initializeOutputFile()\n");
		exit(ret);
	}

	startTime = av_gettime();

	decodeTask.swap(std::thread([&] {
		bool ret = true;
		while (ret)
		{
			AVPacket avPacket;
			av_init_packet(&avPacket);
			av_read_frame(logoFormatContext, &avPacket);
			ret = avcodec_send_packet(logoDecoderContext, &avPacket);
			ret = avcodec_receive_frame(logoDecoderContext, logoFrame);
			if (ret) break;
		}
	}));
	decodeTask.join();

	AVFrame* target_logo_frame = av_frame_alloc();
	av_frame_make_writable(target_logo_frame);
	target_logo_frame->format = videoDecoderContext->pix_fmt;
	target_logo_frame->width = logoFrame->width;
	target_logo_frame->height = logoFrame->height;
	target_logo_frame->pts = 0;

	ret = av_image_alloc(
		target_logo_frame->data, target_logo_frame->linesize, target_logo_frame->width, target_logo_frame->height,
		videoDecoderContext->pix_fmt, 32);

	//获取图片处理对象的上下文
	struct SwsContext *sws_ctx = sws_getContext(
		logoFrame->width, logoFrame->height, logoDecoderContext->pix_fmt, logoFrame->width, logoFrame->height,
		videoDecoderContext->pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);

	//将图片数据添加到帧中
	uint8_t *src_planes[4];
	int src_linesize[4];
	av_image_fill_arrays(
		src_planes, src_linesize, (const uint8_t *)target_logo_frame->data, logoDecoderContext->pix_fmt,
		logoFrame->width, logoFrame->height, 1);

	sws_scale(
		sws_ctx, (const uint8_t *const *)src_planes, src_linesize, 0,
		logoFrame->height, target_logo_frame->data, target_logo_frame->linesize);

	//读取数据包
	while (1) 
	{
		if ((ret = av_read_frame(inputFormatContext, &packet)) < 0)
			break;

		//处理视频流
		if (packet.stream_index == videoStreamIndex) 
		{ 
			ret = avcodec_send_packet(videoDecoderContext, &packet);
			if (ret < 0) 
			{
				av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
				break;
			}
			while (ret >= 0) {
				ret = avcodec_receive_frame(videoDecoderContext, frame);
				if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 
				{
					break;
				}
				else if (ret < 0) 
				{
					av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
					goto end;
				}

				frame->pts = frame->best_effort_timestamp;
				logoFrame->pts = frame->best_effort_timestamp;

				//将原始帧添加到滤镜图像中
				//if (av_buffersrc_add_frame_flags(bufferSourceContext, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
				if (av_buffersrc_add_frame_flags(bufferSourceContext, logoFrame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
				{
					av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
					break;
				}
				//添加图像信息
				if (av_buffersrc_add_frame_flags(bufferSource1Context, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
					av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph1\n");
					break;
				}

				/* pull filtered frames from the filtergraph */
				while (1) {
					ret = av_buffersink_get_frame(bufferSinkContext, filt_frame);
					if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
						break;
					if (ret < 0)
						goto end;
					output_frame(filt_frame, bufferSinkContext->inputs[0]->time_base);
					av_frame_unref(filt_frame);
				}
				av_frame_unref(frame);
			}
		}
		//音频包直接输出
		else 
		{
			packet.pts = av_rescale_q_rnd(packet.pts, inputFormatContext->streams[audioStreamIndex]->time_base, audioStream->time_base, (AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.dts = av_rescale_q_rnd(packet.dts, inputFormatContext->streams[audioStreamIndex]->time_base, audioStream->time_base, (AVRounding)(AV_ROUND_INF | AV_ROUND_PASS_MINMAX));
			packet.stream_index = audioStream->index;
			av_interleaved_write_frame(outputFormatContext, &packet);
		}
		av_packet_unref(&packet);
	}
end:
    //添加数据结束字符,清理缓存
   error = av_write_trailer(outputFormatContext);
	avfilter_graph_free(&filterGraph);
	avcodec_free_context(&videoDecoderContext);
	avcodec_free_context(&logoDecoderContext);
	avformat_close_input(&inputFormatContext);
	av_frame_free(&frame);
	av_frame_free(&filt_frame);
	av_frame_free(&logoFrame);

	if (ret < 0 && ret != AVERROR_EOF) {
		exit(1);
	}
	exit(0);
}

使用效果

原始的视频输入如下图所示:

FFmpeg 视频水印时长 android ffmpeg视频加水印_流媒体_03


添加水印之后的效果图如下图所示:

FFmpeg 视频水印时长 android ffmpeg视频加水印_C++_04