1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

Merge commit '722ec3eb35bc152ce91d0a4502eca0df1c0086d0'

* commit '722ec3eb35bc152ce91d0a4502eca0df1c0086d0':
  avconv: decouple configuring filtergraphs and setting input parameters

Merged-by: Hendrik Leppkes <h.leppkes@gmail.com>
This commit is contained in:
Hendrik Leppkes 2016-11-03 14:53:58 +01:00
commit b6422902d8
4 changed files with 136 additions and 22 deletions

View File

@ -471,6 +471,7 @@ static void ffmpeg_cleanup(int ret)
FilterGraph *fg = filtergraphs[i];
avfilter_graph_free(&fg->graph);
for (j = 0; j < fg->nb_inputs; j++) {
av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
av_freep(&fg->inputs[j]->name);
av_freep(&fg->inputs[j]);
}
@ -2128,6 +2129,16 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
ist->resample_channel_layout = decoded_frame->channel_layout;
ist->resample_channels = avctx->channels;
for (i = 0; i < ist->nb_filters; i++) {
err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error reconfiguring input stream %d:%d filter %d\n",
ist->file_index, ist->st->index, i);
goto fail;
}
}
for (i = 0; i < nb_filtergraphs; i++)
if (ist_in_filtergraph(filtergraphs[i], ist)) {
FilterGraph *fg = filtergraphs[i];
@ -2169,6 +2180,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
}
decoded_frame->pts = AV_NOPTS_VALUE;
fail:
av_frame_unref(ist->filter_frame);
av_frame_unref(decoded_frame);
return err < 0 ? err : ret;
@ -2307,6 +2319,16 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eo
ist->resample_height = decoded_frame->height;
ist->resample_pix_fmt = decoded_frame->format;
for (i = 0; i < ist->nb_filters; i++) {
err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error reconfiguring input stream %d:%d filter %d\n",
ist->file_index, ist->st->index, i);
goto fail;
}
}
for (i = 0; i < nb_filtergraphs; i++) {
if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
configure_filtergraph(filtergraphs[i]) < 0) {
@ -3305,6 +3327,16 @@ static int transcode_init(void)
enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
filtergraph_is_simple(ost->filter->graph)) {
FilterGraph *fg = ost->filter->graph;
if (dec_ctx) {
ret = ifilter_parameters_from_decoder(fg->inputs[0],
dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
exit_program(1);
}
}
if (configure_filtergraph(fg)) {
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
exit_program(1);

View File

@ -231,6 +231,18 @@ typedef struct InputFilter {
struct InputStream *ist;
struct FilterGraph *graph;
uint8_t *name;
// parameters configured for this input
int format;
int width, height;
AVRational sample_aspect_ratio;
int sample_rate;
int channels;
uint64_t channel_layout;
AVBufferRef *hw_frames_ctx;
} InputFilter;
typedef struct OutputFilter {
@ -600,6 +612,9 @@ int filtergraph_is_simple(FilterGraph *fg);
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int init_complex_filtergraph(FilterGraph *fg);
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
int ifilter_parameters_from_decoder(InputFilter *ifilter, const AVCodecContext *avctx);
int ffmpeg_parse_options(int argc, char **argv);
int vdpau_init(AVCodecContext *s);

View File

@ -214,6 +214,7 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
exit_program(1);
fg->inputs[0]->ist = ist;
fg->inputs[0]->graph = fg;
fg->inputs[0]->format = -1;
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
@ -292,6 +293,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
exit_program(1);
fg->inputs[fg->nb_inputs - 1]->ist = ist;
fg->inputs[fg->nb_inputs - 1]->graph = fg;
fg->inputs[fg->nb_inputs - 1]->format = -1;
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
@ -667,7 +669,7 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
}
}
static int sub2video_prepare(InputStream *ist)
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
{
AVFormatContext *avf = input_files[ist->file_index]->ctx;
int i, w, h;
@ -675,8 +677,8 @@ static int sub2video_prepare(InputStream *ist)
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codecpar has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
w = ist->dec_ctx->width;
h = ist->dec_ctx->height;
w = ifilter->width;
h = ifilter->height;
if (!(w && h)) {
for (i = 0; i < avf->nb_streams; i++) {
if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
@ -695,7 +697,7 @@ static int sub2video_prepare(InputStream *ist)
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt = AV_PIX_FMT_RGB32;
ist->resample_pix_fmt = ifilter->format = AV_PIX_FMT_RGB32;
ist->sub2video.frame = av_frame_alloc();
if (!ist->sub2video.frame)
@ -736,22 +738,19 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist);
ret = sub2video_prepare(ist, ifilter);
if (ret < 0)
goto fail;
}
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->dec_ctx->sample_aspect_ratio;
sar = ifilter->sample_aspect_ratio;
if(!sar.den)
sar = (AVRational){0,1};
av_bprint_init(&args, 0, 1);
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
ist->resample_height,
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
"pixel_aspect=%d/%d:sws_param=flags=%d",
ifilter->width, ifilter->height, ifilter->format,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den)
@ -763,7 +762,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
args.str, NULL, fg->graph)) < 0)
goto fail;
par->hw_frames_ctx = ist->hw_frames_ctx;
par->hw_frames_ctx = ifilter->hw_frames_ctx;
ret = av_buffersrc_parameters_set(ifilter->filter, par);
if (ret < 0)
goto fail;
@ -866,14 +865,14 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1, ist->dec_ctx->sample_rate,
ist->dec_ctx->sample_rate,
av_get_sample_fmt_name(ist->dec_ctx->sample_fmt));
if (ist->dec_ctx->channel_layout)
1, ifilter->sample_rate,
ifilter->sample_rate,
av_get_sample_fmt_name(ifilter->format));
if (ifilter->channel_layout)
av_bprintf(&args, ":channel_layout=0x%"PRIx64,
ist->dec_ctx->channel_layout);
ifilter->channel_layout);
else
av_bprintf(&args, ":channels=%d", ist->dec_ctx->channels);
av_bprintf(&args, ":channels=%d", ifilter->channels);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
@ -1093,6 +1092,58 @@ int configure_filtergraph(FilterGraph *fg)
return 0;
}
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
{
av_buffer_unref(&ifilter->hw_frames_ctx);
ifilter->format = frame->format;
ifilter->width = frame->width;
ifilter->height = frame->height;
ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
ifilter->sample_rate = frame->sample_rate;
ifilter->channels = av_frame_get_channels(frame);
ifilter->channel_layout = frame->channel_layout;
if (frame->hw_frames_ctx) {
ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
if (!ifilter->hw_frames_ctx)
return AVERROR(ENOMEM);
}
return 0;
}
int ifilter_parameters_from_decoder(InputFilter *ifilter, const AVCodecContext *avctx)
{
av_buffer_unref(&ifilter->hw_frames_ctx);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
ifilter->format = avctx->pix_fmt;
else
ifilter->format = avctx->sample_fmt;
ifilter->width = avctx->width;
ifilter->height = avctx->height;
if (ifilter->ist && ifilter->ist->st && ifilter->ist->st->sample_aspect_ratio.num)
ifilter->sample_aspect_ratio = ifilter->ist->st->sample_aspect_ratio;
else
ifilter->sample_aspect_ratio = avctx->sample_aspect_ratio;
ifilter->sample_rate = avctx->sample_rate;
ifilter->channels = avctx->channels;
ifilter->channel_layout = avctx->channel_layout;
if (avctx->hw_frames_ctx) {
ifilter->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
if (!ifilter->hw_frames_ctx)
return AVERROR(ENOMEM);
}
return 0;
}
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
{
int i;

View File

@ -2008,12 +2008,28 @@ static int init_complex_filters(void)
static int configure_complex_filters(void)
{
int i, ret = 0;
int i, j, ret = 0;
for (i = 0; i < nb_filtergraphs; i++)
if (!filtergraph_is_simple(filtergraphs[i]) &&
(ret = configure_filtergraph(filtergraphs[i])) < 0)
for (i = 0; i < nb_filtergraphs; i++) {
FilterGraph *fg = filtergraphs[i];
if (filtergraph_is_simple(fg))
continue;
for (j = 0; j < fg->nb_inputs; j++) {
ret = ifilter_parameters_from_decoder(fg->inputs[j],
fg->inputs[j]->ist->dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error initializing filtergraph %d input %d\n", i, j);
return ret;
}
}
ret = configure_filtergraph(filtergraphs[i]);
if (ret < 0)
return ret;
}
return 0;
}