libavcodec API usage: use new video encoding API

avcodec_encode_video() has been replaced with avcodec_encode_video2()
in new libavcodec versions.
This commit is contained in:
Anton Khirnov 2014-01-16 15:40:37 +01:00 committed by Sergey Sharybin
parent 8c3b27ce27
commit b7f8bfef25
3 changed files with 75 additions and 71 deletions

@ -357,4 +357,35 @@ int64_t av_get_pts_from_frame(AVFormatContext *avctx, AVFrame * picture)
# define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
#endif
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 1, 0)
FFMPEG_INLINE
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_output)
{
int outsize, ret;
ret = av_new_packet(pkt, avctx->width * avctx->height * 7 + 10000);
if (ret < 0)
return ret;
outsize = avcodec_encode_video(avctx, pkt->data, pkt->size, frame);
if (outsize <= 0) {
*got_output = 0;
av_free_packet(pkt);
}
else {
*got_output = 1;
av_shrink_packet(pkt, outsize);
if (avctx->coded_frame) {
pkt->pts = avctx->coded_frame->pts;
if (avctx->coded_frame->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
}
}
return outsize >= 0 ? 0 : outsize;
}
#endif
#endif

@ -84,9 +84,6 @@ static AVStream *audio_stream = 0;
static AVFrame *current_frame = 0;
static struct SwsContext *img_convert_ctx = 0;
static uint8_t *video_buffer = 0;
static int video_buffersize = 0;
static uint8_t *audio_input_buffer = 0;
static uint8_t *audio_deinterleave_buffer = 0;
static int audio_input_samples = 0;
@ -309,9 +306,12 @@ static const char **get_file_extensions(int format)
/* Write a frame to the output file */
static int write_video_frame(RenderData *rd, int cfra, AVFrame *frame, ReportList *reports)
{
int outsize = 0;
int got_output;
int ret, success = 1;
AVCodecContext *c = video_stream->codec;
AVPacket packet = { 0 };
av_init_packet(&packet);
frame->pts = cfra;
@ -319,28 +319,28 @@ static int write_video_frame(RenderData *rd, int cfra, AVFrame *frame, ReportLis
frame->top_field_first = ((rd->mode & R_ODDFIELD) != 0);
}
outsize = avcodec_encode_video(c, video_buffer, video_buffersize, frame);
ret = avcodec_encode_video2(c, &packet, frame, &got_output);
if (outsize > 0) {
AVPacket packet;
av_init_packet(&packet);
if (c->coded_frame->pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_stream->time_base);
if (ret >= 0 && got_output) {
if (packet.pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(packet.pts, c->time_base, video_stream->time_base);
PRINT("Video Frame PTS: %d\n", (int)packet.pts);
}
else {
PRINT("Video Frame PTS: not set\n");
}
if (c->coded_frame->key_frame)
packet.flags |= AV_PKT_FLAG_KEY;
if (packet.dts != AV_NOPTS_VALUE) {
packet.dts = av_rescale_q(packet.dts, c->time_base, video_stream->time_base);
PRINT("Video Frame DTS: %d\n", (int)packet.dts);
} else {
PRINT("Video Frame DTS: not set\n");
}
packet.stream_index = video_stream->index;
packet.data = video_buffer;
packet.size = outsize;
ret = av_interleaved_write_frame(outfile, &packet);
success = (ret == 0);
}
else if (outsize < 0) {
else if (ret < 0) {
success = 0;
}
@ -638,21 +638,6 @@ static AVStream *alloc_video_stream(RenderData *rd, int codec_id, AVFormatContex
return NULL;
}
if (codec_id == AV_CODEC_ID_QTRLE) {
/* normally it should be enough to have buffer with actual image size,
* but some codecs like QTRLE might store extra information in this buffer,
* so it should be a way larger */
/* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)
* (from FFmpeg sources) */
int size = c->width * c->height;
video_buffersize = 7 * size + 10000;
}
else
video_buffersize = avpicture_get_size(c->pix_fmt, c->width, c->height);
video_buffer = (uint8_t *)MEM_mallocN(video_buffersize * sizeof(uint8_t), "FFMPEG video buffer");
current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
@ -964,36 +949,38 @@ static int start_ffmpeg_impl(struct RenderData *rd, int rectx, int recty, Report
*/
static void flush_ffmpeg(void)
{
int outsize = 0;
int ret = 0;
AVCodecContext *c = video_stream->codec;
/* get the delayed frames */
while (1) {
AVPacket packet;
int got_output;
AVPacket packet = { 0 };
av_init_packet(&packet);
outsize = avcodec_encode_video(c, video_buffer, video_buffersize, NULL);
if (outsize < 0) {
fprintf(stderr, "Error encoding delayed frame %d\n", outsize);
ret = avcodec_encode_video2(c, &packet, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding delayed frame %d\n", ret);
break;
}
if (outsize == 0) {
if (!got_output) {
break;
}
if (c->coded_frame->pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_stream->time_base);
if (packet.pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(packet.pts, c->time_base, video_stream->time_base);
PRINT("Video Frame PTS: %d\n", (int) packet.pts);
}
else {
PRINT("Video Frame PTS: not set\n");
}
if (c->coded_frame->key_frame) {
packet.flags |= AV_PKT_FLAG_KEY;
if (packet.dts != AV_NOPTS_VALUE) {
packet.dts = av_rescale_q(packet.dts, c->time_base, video_stream->time_base);
PRINT("Video Frame DTS: %d\n", (int) packet.dts);
} else {
PRINT("Video Frame DTS: not set\n");
}
packet.stream_index = video_stream->index;
packet.data = video_buffer;
packet.size = outsize;
ret = av_interleaved_write_frame(outfile, &packet);
if (ret != 0) {
fprintf(stderr, "Error writing delayed frame %d\n", ret);
@ -1208,10 +1195,6 @@ static void end_ffmpeg_impl(int is_autosplit)
av_free(outfile);
outfile = 0;
}
if (video_buffer) {
MEM_freeN(video_buffer);
video_buffer = 0;
}
if (audio_input_buffer) {
av_free(audio_input_buffer);
audio_input_buffer = 0;

@ -456,8 +456,6 @@ struct proxy_output_ctx {
AVCodec *codec;
struct SwsContext *sws_ctx;
AVFrame *frame;
uint8_t *video_buffer;
int video_buffersize;
int cfra;
int proxy_size;
int orig_height;
@ -552,10 +550,6 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
avcodec_open2(rv->c, rv->codec, NULL);
rv->video_buffersize = 2000000;
rv->video_buffer = (uint8_t *)MEM_mallocN(
rv->video_buffersize, "FFMPEG video buffer");
rv->orig_height = av_get_cropped_height_from_codec(st->codec);
if (st->codec->width != width || st->codec->height != height ||
@ -592,7 +586,10 @@ static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
static int add_to_proxy_output_ffmpeg(
struct proxy_output_ctx *ctx, AVFrame *frame)
{
int outsize = 0;
AVPacket packet = { 0 };
int ret, got_output;
av_init_packet(&packet);
if (!ctx) {
return 0;
@ -613,31 +610,26 @@ static int add_to_proxy_output_ffmpeg(
frame->pts = ctx->cfra++;
}
outsize = avcodec_encode_video(
ctx->c, ctx->video_buffer, ctx->video_buffersize,
frame);
if (outsize < 0) {
ret = avcodec_encode_video2(ctx->c, &packet, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding proxy frame %d for '%s'\n",
ctx->cfra - 1, ctx->of->filename);
return 0;
}
if (outsize != 0) {
AVPacket packet;
av_init_packet(&packet);
if (ctx->c->coded_frame->pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(ctx->c->coded_frame->pts,
if (got_output) {
if (packet.pts != AV_NOPTS_VALUE) {
packet.pts = av_rescale_q(packet.pts,
ctx->c->time_base,
ctx->st->time_base);
}
if (packet.dts != AV_NOPTS_VALUE) {
packet.dts = av_rescale_q(packet.dts,
ctx->c->time_base,
ctx->st->time_base);
}
if (ctx->c->coded_frame->key_frame)
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index = ctx->st->index;
packet.data = ctx->video_buffer;
packet.size = outsize;
if (av_interleaved_write_frame(ctx->of, &packet) != 0) {
fprintf(stderr, "Error writing proxy frame %d "
@ -680,8 +672,6 @@ static void free_proxy_output_ffmpeg(struct proxy_output_ctx *ctx,
}
avformat_free_context(ctx->of);
MEM_freeN(ctx->video_buffer);
if (ctx->sws_ctx) {
sws_freeContext(ctx->sws_ctx);