FFMPEG: adding support for ffmpeg planar formats also to writeffmpeg.c.

This commit is contained in:
Joerg Mueller 2013-04-01 15:31:27 +00:00
parent c35dec60b5
commit 8588fec935

@ -88,10 +88,11 @@ static uint8_t *video_buffer = 0;
static int video_buffersize = 0;
static uint8_t *audio_input_buffer = 0;
static uint8_t *audio_deinterleave_buffer = 0;
static int audio_input_samples = 0;
static uint8_t *audio_output_buffer = 0;
static int audio_outbuf_size = 0;
static double audio_time = 0.0f;
static bool audio_deinterleave = false;
static int audio_sample_size = 0;
#ifdef WITH_AUDASPACE
static AUD_Device *audio_mixdown_device = 0;
@ -122,24 +123,48 @@ static int write_audio_frame(void)
{
AVCodecContext *c = NULL;
AVPacket pkt;
AVFrame* frame;
int got_output = 0;
c = audio_stream->codec;
av_init_packet(&pkt);
pkt.size = 0;
pkt.data = NULL;
frame = avcodec_alloc_frame();
frame->nb_samples = audio_input_samples;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
AUD_readDevice(audio_mixdown_device, audio_input_buffer, audio_input_samples);
audio_time += (double) audio_input_samples / (double) c->sample_rate;
pkt.size = avcodec_encode_audio(c, audio_output_buffer, audio_outbuf_size, (short *) audio_input_buffer);
if(audio_deinterleave) {
int channel, i;
uint8_t* temp;
if (pkt.size < 0) {
for(channel = 0; channel < c->channels; channel++) {
for(i = 0; i < frame->nb_samples; i++) {
memcpy(audio_deinterleave_buffer + (i + channel * frame->nb_samples) * audio_sample_size,
audio_input_buffer + (c->channels * i + channel) * audio_sample_size, audio_sample_size);
}
}
temp = audio_deinterleave_buffer;
audio_deinterleave_buffer = audio_input_buffer;
audio_input_buffer = temp;
}
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, audio_input_buffer,
audio_input_samples * c->channels * audio_sample_size, 0);
if(avcodec_encode_audio2(c, &pkt, frame, &got_output) < 0) {
// XXX error("Error writing audio packet");
return -1;
}
pkt.data = audio_output_buffer;
if(got_output) {
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) {
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_stream->time_base);
PRINT("Audio Frame PTS: %d\n", (int) pkt.pts);
@ -153,6 +178,12 @@ static int write_audio_frame(void)
fprintf(stderr, "Error writing audio packet!\n");
return -1;
}
av_free_packet(&pkt);
}
avcodec_free_frame(&frame);
return 0;
}
#endif // #ifdef WITH_AUDASPACE
@ -608,8 +639,6 @@ static AVStream *alloc_video_stream(RenderData *rd, int codec_id, AVFormatContex
return st;
}
/* Prepare an audio stream for the output file */
static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContext *of, char *error, int error_size)
{
AVStream *st;
@ -659,11 +688,6 @@ static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContex
}
}
if (c->sample_fmt == AV_SAMPLE_FMT_FLTP) {
BLI_strncpy(error, "Requested audio codec requires planar float sample format, which is not supported yet", error_size);
return NULL;
}
if (codec->supported_samplerates) {
const int *p = codec->supported_samplerates;
int best = 0;
@ -692,24 +716,23 @@ static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContex
st->codec->time_base.num = 1;
st->codec->time_base.den = st->codec->sample_rate;
audio_outbuf_size = FF_MIN_BUFFER_SIZE;
if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
audio_input_samples = audio_outbuf_size * 8 / c->bits_per_coded_sample / c->channels;
if (c->frame_size == 0)
// used to be if((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
// not sure if that is needed anymore, so let's try out if there are any
// complaints regarding some ffmpeg versions users might have
audio_input_samples = FF_MIN_BUFFER_SIZE * 8 / c->bits_per_coded_sample / c->channels;
else {
audio_input_samples = c->frame_size;
if (c->frame_size * c->channels * sizeof(int16_t) * 4 > audio_outbuf_size)
audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
}
audio_output_buffer = (uint8_t *) av_malloc(audio_outbuf_size);
audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
if (c->sample_fmt == AV_SAMPLE_FMT_FLT) {
audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * sizeof(float));
}
else {
audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * sizeof(int16_t));
}
audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);
audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * audio_sample_size);
if(audio_deinterleave)
audio_deinterleave_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * audio_sample_size);
audio_time = 0.0f;
@ -1010,12 +1033,28 @@ int BKE_ffmpeg_start(struct Scene *scene, RenderData *rd, int rectx, int recty,
AVCodecContext *c = audio_stream->codec;
AUD_DeviceSpecs specs;
specs.channels = c->channels;
if (c->sample_fmt == AV_SAMPLE_FMT_FLT) {
specs.format = AUD_FORMAT_FLOAT32;
}
else {
switch(av_get_packed_sample_fmt(c->sample_fmt))
{
case AV_SAMPLE_FMT_U8:
specs.format = AUD_FORMAT_U8;
break;
case AV_SAMPLE_FMT_S16:
specs.format = AUD_FORMAT_S16;
break;
case AV_SAMPLE_FMT_S32:
specs.format = AUD_FORMAT_S32;
break;
case AV_SAMPLE_FMT_FLT:
specs.format = AUD_FORMAT_FLOAT32;
break;
case AV_SAMPLE_FMT_DBL:
specs.format = AUD_FORMAT_FLOAT64;
break;
default:
return -31415;
}
specs.rate = rd->ffcodecdata.audio_mixrate;
audio_mixdown_device = sound_mixdown(scene, specs, rd->sfra, rd->ffcodecdata.audio_volume);
#ifdef FFMPEG_CODEC_TIME_BASE
@ -1138,15 +1177,16 @@ static void end_ffmpeg_impl(int is_autosplit)
MEM_freeN(video_buffer);
video_buffer = 0;
}
if (audio_output_buffer) {
av_free(audio_output_buffer);
audio_output_buffer = 0;
}
if (audio_input_buffer) {
av_free(audio_input_buffer);
audio_input_buffer = 0;
}
if (audio_deinterleave_buffer) {
av_free(audio_deinterleave_buffer);
audio_deinterleave_buffer = 0;
}
if (img_convert_ctx) {
sws_freeContext(img_convert_ctx);
img_convert_ctx = 0;