1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '755cd4197d53946208e042f095b930dca18d9430'

* commit '755cd4197d53946208e042f095b930dca18d9430':
  mov: enable parsing for VC-1.
  lavfi: Add fps filter.
  lavfi: initialize pts to AV_NOPTS_VALUE when creating new buffer refs.
  avconv: add support for audio in complex filtergraphs.

Conflicts:
	ffmpeg.c
	libavfilter/version.h
	libavformat/mov.c
	tests/ref/fate/vc1-ism

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-05-19 20:31:24 +02:00
commit 040a796dab
10 changed files with 429 additions and 15 deletions

View File

@ -31,6 +31,7 @@ version next:
- Avid Meridien (AVUI) decoder
- accept + prefix to -pix_fmt option to disable automatic conversions.
- audio filters support in libavfilter and avconv
- add fps filter
version 0.10:

View File

@ -1730,6 +1730,19 @@ format=yuv420p
format=yuv420p:yuv444p:yuv410p
@end example
@section fps
Convert the video to specified constant framerate by duplicating or dropping
frames as necessary.
This filter accepts the following named parameters:
@table @option
@item fps
Desired output framerate.
@end table
@anchor{frei0r}
@section frei0r

138
ffmpeg.c
View File

@ -249,7 +249,7 @@ typedef struct InputStream {
int dr1;
/* decoded data from this stream goes into all those filters
* currently video only */
* currently video and audio only */
InputFilter **filters;
int nb_filters;
} InputStream;
@ -1109,8 +1109,9 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
int i;
// TODO: support other filter types
if (type != AVMEDIA_TYPE_VIDEO) {
av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
"currently.\n");
exit_program(1);
}
@ -1171,7 +1172,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
}
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
char *pix_fmts;
AVCodecContext *codec = ofilter->ost->st->codec;
@ -1233,6 +1234,104 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFil
return 0;
}
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
OutputStream *ost = ofilter->ost;
AVCodecContext *codec = ost->st->codec;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
char *sample_fmts, *sample_rates, *channel_layouts;
int ret;
ret = avfilter_graph_create_filter(&ofilter->filter,
avfilter_get_by_name("abuffersink"),
"out", NULL, NULL, fg->graph);
if (ret < 0)
return ret;
if (codec->channels && !codec->channel_layout)
codec->channel_layout = av_get_default_channel_layout(codec->channels);
sample_fmts = choose_sample_fmts(ost);
sample_rates = choose_sample_rates(ost);
channel_layouts = choose_channel_layouts(ost);
if (sample_fmts || sample_rates || channel_layouts) {
AVFilterContext *format;
char args[256];
int len = 0;
if (sample_fmts)
len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
sample_fmts);
if (sample_rates)
len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
sample_rates);
if (channel_layouts)
len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
channel_layouts);
args[len - 1] = 0;
av_freep(&sample_fmts);
av_freep(&sample_rates);
av_freep(&channel_layouts);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
"aformat", args, NULL, fg->graph);
if (ret < 0)
return ret;
ret = avfilter_link(last_filter, pad_idx, format, 0);
if (ret < 0)
return ret;
last_filter = format;
pad_idx = 0;
}
if (audio_sync_method > 0) {
AVFilterContext *async;
char args[256];
int len = 0;
av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
"asyncts audio filter instead.\n");
if (audio_sync_method > 1)
len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
"max_comp=%d:", audio_sync_method);
snprintf(args + len, sizeof(args) - len, "min_delta=%f",
audio_drift_threshold);
ret = avfilter_graph_create_filter(&async,
avfilter_get_by_name("asyncts"),
"async", args, NULL, fg->graph);
if (ret < 0)
return ret;
ret = avfilter_link(last_filter, pad_idx, async, 0);
if (ret < 0)
return ret;
last_filter = async;
pad_idx = 0;
}
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
return ret;
return 0;
}
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
switch (out->filter_ctx->output_pads[out->pad_idx].type) {
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
default: av_assert0(0);
}
}
static int configure_complex_filter(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
@ -1252,16 +1351,34 @@ static int configure_complex_filter(FilterGraph *fg)
InputFilter *ifilter = fg->inputs[i];
InputStream *ist = ifilter->ist;
AVRational sar;
AVFilter *filter;
char args[255];
sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
switch (cur->filter_ctx->input_pads[cur->pad_idx].type) {
case AVMEDIA_TYPE_VIDEO:
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->st->codec->sample_aspect_ratio;
snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
sar.num, sar.den);
filter = avfilter_get_by_name("buffer");
break;
case AVMEDIA_TYPE_AUDIO:
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:"
"sample_fmt=%s:channel_layout=0x%"PRIx64,
ist->st->time_base.num, ist->st->time_base.den,
ist->st->codec->sample_rate,
av_get_sample_fmt_name(ist->st->codec->sample_fmt),
ist->st->codec->channel_layout);
filter = avfilter_get_by_name("abuffer");
break;
default:
av_assert0(0);
}
if ((ret = avfilter_graph_create_filter(&ifilter->filter,
avfilter_get_by_name("buffer"), cur->name,
filter, cur->name,
args, NULL, fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(ifilter->filter, 0,
@ -4814,12 +4931,15 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
{
OutputStream *ost;
if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
default:
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
"currently.\n");
exit_program(1);
}
ost = new_video_stream(o, oc, -1);
ost->source_index = -1;
ost->filter = ofilter;

View File

@ -83,6 +83,7 @@ OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
OBJS-$(CONFIG_FIFO_FILTER) += vf_fifo.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o

View File

@ -73,6 +73,7 @@ void avfilter_register_all(void)
REGISTER_FILTER (FIELDORDER, fieldorder, vf);
REGISTER_FILTER (FIFO, fifo, vf);
REGISTER_FILTER (FORMAT, format, vf);
REGISTER_FILTER (FPS, fps, vf);
REGISTER_FILTER (FREI0R, frei0r, vf);
REGISTER_FILTER (GRADFUN, gradfun, vf);
REGISTER_FILTER (HFLIP, hflip, vf);

View File

@ -133,6 +133,8 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
samplesref->extended_data = samplesref->data;
}
samplesref->pts = AV_NOPTS_VALUE;
return samplesref;
fail:

View File

@ -29,8 +29,8 @@
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 74
#define LIBAVFILTER_VERSION_MICRO 102
#define LIBAVFILTER_VERSION_MINOR 75
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \

271
libavfilter/vf_fps.c Normal file
View File

@ -0,0 +1,271 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* a filter enforcing given constant framerate
*/
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
typedef struct FPSContext {
const AVClass *class;
AVFifoBuffer *fifo; ///< store frames until we get two successive timestamps
/* timestamps in input timebase */
int64_t first_pts; ///< pts of the first frame that arrived on this filter
int64_t pts; ///< pts of the first frame currently in the fifo
AVRational framerate; ///< target framerate
char *fps; ///< a string describing target framerate
/* statistics */
int frames_in; ///< number of frames on input
int frames_out; ///< number of frames on output
int dup; ///< number of frames duplicated
int drop; ///< number of framed dropped
} FPSContext;
#define OFFSET(x) offsetof(FPSContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "fps", "A string describing desired output framerate", OFFSET(fps), AV_OPT_TYPE_STRING, { .str = "25" }, .flags = V },
{ NULL },
};
static const AVClass class = {
.class_name = "FPS filter",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
{
FPSContext *s = ctx->priv;
int ret;
s->class = &class;
av_opt_set_defaults(s);
if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing the options string %s.\n",
args);
return ret;
}
if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps);
return ret;
}
av_opt_free(s);
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFilterBufferRef*))))
return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den);
return 0;
}
static void flush_fifo(AVFifoBuffer *fifo)
{
while (av_fifo_size(fifo)) {
AVFilterBufferRef *tmp;
av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL);
avfilter_unref_buffer(tmp);
}
}
static av_cold void uninit(AVFilterContext *ctx)
{
FPSContext *s = ctx->priv;
if (s->fifo) {
flush_fifo(s->fifo);
av_fifo_free(s->fifo);
}
av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, "
"%d frames duplicated.\n", s->frames_in, s->frames_out, s->drop, s->dup);
}
static int config_props(AVFilterLink* link)
{
FPSContext *s = link->src->priv;
link->time_base = (AVRational){ s->framerate.den, s->framerate.num };
link->w = link->src->inputs[0]->w;
link->h = link->src->inputs[0]->h;
s->pts = AV_NOPTS_VALUE;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FPSContext *s = ctx->priv;
int frames_out = s->frames_out;
int ret = 0;
while (ret >= 0 && s->frames_out == frames_out)
ret = avfilter_request_frame(ctx->inputs[0]);
/* flush the fifo */
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
int i;
for (i = 0; av_fifo_size(s->fifo); i++) {
AVFilterBufferRef *buf;
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
outlink->time_base) + s->frames_out;
avfilter_start_frame(outlink, buf);
avfilter_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink);
s->frames_out++;
}
return 0;
}
return ret;
}
static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf)
{
int ret;
if (!av_fifo_space(fifo) &&
(ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo))))
return ret;
av_fifo_generic_write(fifo, &buf, sizeof(buf), NULL);
return 0;
}
static void end_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FPSContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFilterBufferRef *buf = inlink->cur_buf;
int64_t delta;
int i;
s->frames_in++;
/* discard frames until we get the first timestamp */
if (s->pts == AV_NOPTS_VALUE) {
if (buf->pts != AV_NOPTS_VALUE) {
write_to_fifo(s->fifo, buf);
s->first_pts = s->pts = buf->pts;
} else {
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
"timestamp.\n");
avfilter_unref_buffer(buf);
s->drop++;
}
return;
}
/* now wait for the next timestamp */
if (buf->pts == AV_NOPTS_VALUE) {
write_to_fifo(s->fifo, buf);
return;
}
/* number of output frames */
delta = av_rescale_q(buf->pts - s->pts, inlink->time_base,
outlink->time_base);
if (delta < 1) {
/* drop the frame and everything buffered except the first */
AVFilterBufferRef *tmp;
int drop = av_fifo_size(s->fifo)/sizeof(AVFilterBufferRef*);
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
s->drop += drop;
av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL);
flush_fifo(s->fifo);
write_to_fifo(s->fifo, tmp);
avfilter_unref_buffer(buf);
return;
}
/* can output >= 1 frames */
for (i = 0; i < delta; i++) {
AVFilterBufferRef *buf_out;
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL);
/* duplicate the frame if needed */
if (!av_fifo_size(s->fifo) && i < delta - 1) {
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
write_to_fifo(s->fifo, avfilter_ref_buffer(buf_out, AV_PERM_READ));
s->dup++;
}
buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base,
outlink->time_base) + s->frames_out;
avfilter_start_frame(outlink, buf_out);
avfilter_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink);
s->frames_out++;
}
flush_fifo(s->fifo);
write_to_fifo(s->fifo, buf);
s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base);
}
static void null_start_frame(AVFilterLink *link, AVFilterBufferRef *buf)
{
}
static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
}
AVFilter avfilter_vf_fps = {
.name = "fps",
.description = NULL_IF_CONFIG_SMALL("Force constant framerate"),
.init = init,
.uninit = uninit,
.priv_size = sizeof(FPSContext),
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.start_frame = null_start_frame,
.draw_slice = null_draw_slice,
.end_frame = end_frame, },
{ .name = NULL}},
.outputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
.config_props = config_props},
{ .name = NULL}},
};

View File

@ -150,6 +150,8 @@ avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int lin
pic-> extended_data = pic->data;
picref->extended_data = picref->data;
picref->pts = AV_NOPTS_VALUE;
return picref;
fail:

View File

@ -1554,6 +1554,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
case CODEC_ID_MPEG1VIDEO:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_VC1:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
break;
}