1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-26 19:01:44 +02:00

lavdev: add libavfilter virtual input device

This input device is to be considered still experimental, only video
output is supported.
This commit is contained in:
Stefano Sabatini 2011-06-11 15:40:08 +02:00
parent e72657f83d
commit 214357341a
7 changed files with 342 additions and 1 deletions

View File

@ -7,6 +7,7 @@ version next:
- boxblur filter added
- BWF muxer
- Flash Screen Video 2 decoder
- lavfi input device added
version 0.8:

1
configure vendored
View File

@ -1472,6 +1472,7 @@ dshow_indev_extralibs="-lpsapi -lole32 -lstrmiids -luuid"
dv1394_indev_deps="dv1394 dv_demuxer"
fbdev_indev_deps="linux_fb_h"
jack_indev_deps="jack_jack_h sem_timedwait"
lavfi_indev_deps="avfilter"
libdc1394_indev_deps="libdc1394"
openal_indev_deps="openal"
oss_indev_deps_any="soundcard_h sys_soundcard_h"

View File

@ -133,6 +133,60 @@ $ jack_connect metro:120_bpm ffmpeg:input_1
For more information read:
@url{http://jackaudio.org/}
@section lavfi
Libavfilter input virtual device.
This input device reads data from the open output pads of a libavfilter
filtergraph.
For each filtergraph open output, the input device will create a
corresponding stream which is mapped to the generated output. Currently
only video data is supported. The filtergraph is specified through the
option @option{graph}.
To enable this input device, you need to configure your builf with
@code{--enable-libavfilter}.
@subsection Options
@table @option
@item graph
Specify the filtergraph to use as input. Each video open output must be
labelled by a unique string of the form "out@var{N}", where @var{N} is a
number starting from 0 corresponding to the mapped input stream
generated by the device.
The first unlabelled output is automatically assigned to the "out0"
label, but all the others need to be specified explicitely.
If not specified defaults to the filename specified for the input
device.
@end table
@subsection Examples
@itemize
@item
Create a color video stream and play it back with @file{ffplay}:
@example
ffplay -f lavfi -graph "color=pink [out0]" dummy
@end example
@item
As the previous example, but use filename for specifying the graph
description, and omit the "out0" label:
@example
ffplay -f lavfi color=pink
@end example
@item
Create three different video test filtered sources and play them:
@example
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
@end example
@end itemize
@section libdc1394
IIDC1394 input device, based on libdc1394 and libraw1394.

View File

@ -2,6 +2,7 @@ include $(SUBDIR)../config.mak
NAME = avdevice
FFLIBS = avformat avcodec avutil
FFLIBS-$(CONFIG_LAVFI_INDEV) += avfilter
HEADERS = avdevice.h
@ -19,6 +20,7 @@ OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
OBJS-$(CONFIG_FBDEV_INDEV) += fbdev.o
OBJS-$(CONFIG_JACK_INDEV) += jack_audio.o
OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o

View File

@ -44,6 +44,7 @@ void avdevice_register_all(void)
REGISTER_INDEV (DV1394, dv1394);
REGISTER_INDEV (FBDEV, fbdev);
REGISTER_INDEV (JACK, jack);
REGISTER_INDEV (LAVFI, lavfi);
REGISTER_INDEV (OPENAL, openal);
REGISTER_INOUTDEV (OSS, oss);
REGISTER_OUTDEV (SDL, sdl);

View File

@ -23,7 +23,7 @@
#include "libavformat/avformat.h"
#define LIBAVDEVICE_VERSION_MAJOR 53
#define LIBAVDEVICE_VERSION_MINOR 2
#define LIBAVDEVICE_VERSION_MINOR 3
#define LIBAVDEVICE_VERSION_MICRO 0
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \

282
libavdevice/lavfi.c Normal file
View File

@ -0,0 +1,282 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter virtual input device
*/
/* #define DEBUG */
#include "float.h" /* DBL_MIN, DBL_MAX */
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/vsink_buffer.h"
#include "avdevice.h"
typedef struct {
AVClass *class; ///< class for private options
char *graph_str;
AVFilterGraph *graph;
AVFilterContext **sinks;
int *sink_stream_map;
int *stream_sink_map;
} LavfiContext;
static int *create_all_formats(int n)
{
int i, j, *fmts, count = 0;
for (i = 0; i < n; i++)
if (!(av_pix_fmt_descriptors[i].flags & PIX_FMT_HWACCEL))
count++;
if (!(fmts = av_malloc((count+1) * sizeof(int))))
return NULL;
for (j = 0, i = 0; i < n; i++) {
if (!(av_pix_fmt_descriptors[i].flags & PIX_FMT_HWACCEL))
fmts[j++] = i;
}
fmts[j] = -1;
return fmts;
}
av_cold static int lavfi_read_close(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
av_freep(&lavfi->sink_stream_map);
av_freep(&lavfi->stream_sink_map);
avfilter_graph_free(&lavfi->graph);
return 0;
}
av_cold static int lavfi_read_header(AVFormatContext *avctx,
AVFormatParameters *ap)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
AVFilter *buffersink;
int *pix_fmts = create_all_formats(PIX_FMT_NB);
int ret = 0, i, n;
#define FAIL(ERR) { ret = ERR; goto end; }
avfilter_register_all();
if (!(buffersink = avfilter_get_by_name("buffersink"))) {
av_log(avctx, AV_LOG_ERROR,
"Missing required buffersink filter, aborting.\n");
FAIL(AVERROR_FILTER_NOT_FOUND);
}
if (!lavfi->graph_str)
lavfi->graph_str = av_strdup(avctx->filename);
/* parse the graph, create a stream for each open output */
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
if ((ret = avfilter_graph_parse(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
FAIL(ret);
if (input_links) {
av_log(avctx, AV_LOG_ERROR,
"Open inputs in the filtergraph are not acceptable\n");
FAIL(AVERROR(EINVAL));
}
/* count the outputs */
for (n = 0, inout = output_links; inout; n++, inout = inout->next);
if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
for (i = 0; i < n; i++)
lavfi->stream_sink_map[i] = -1;
/* parse the output link names - they need to be of the form out0, out1, ...
* create a mapping between them and the streams */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
int stream_idx;
if (!strcmp(inout->name, "out"))
stream_idx = 0;
else if (sscanf(inout->name, "out%d\n", &stream_idx) != 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad name '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
if ((unsigned)stream_idx >= n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid index was specified in output '%s', "
"must be a non-negative value < %d\n",
inout->name, n);
FAIL(AVERROR(EINVAL));
}
/* is a video output? */
if (inout->filter_ctx->output_pads[inout->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
av_log(avctx, AV_LOG_ERROR,
"Output '%s' is not a video output, not yet supported", inout->name);
FAIL(AVERROR(EINVAL));
}
if (lavfi->stream_sink_map[stream_idx] != -1) {
av_log(avctx, AV_LOG_ERROR,
"An with stream index %d was already specified\n",
stream_idx);
FAIL(AVERROR(EINVAL));
}
lavfi->sink_stream_map[i] = stream_idx;
lavfi->stream_sink_map[stream_idx] = i;
}
/* for each open output create a corresponding stream */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVStream *st;
if (!(st = av_new_stream(avctx, i)))
FAIL(AVERROR(ENOMEM));
}
/* create a sink for each output and connect them to the graph */
lavfi->sinks = av_malloc(sizeof(AVFilterContext *) * avctx->nb_streams);
if (!lavfi->sinks)
FAIL(AVERROR(ENOMEM));
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
if ((ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
pix_fmts, lavfi->graph)) < 0)
FAIL(ret);
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, 0, sink, 0)) < 0)
FAIL(ret);
}
/* configure the graph */
if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
FAIL(ret);
/* fill each stream with the information in the corresponding sink */
for (i = 0; i < avctx->nb_streams; i++) {
AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
AVStream *st = avctx->streams[i];
st->codec->codec_type = link->type;
av_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
if (link->type == AVMEDIA_TYPE_VIDEO) {
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = link->format;
st->codec->time_base = link->time_base;
st->codec->width = link->w;
st->codec->height = link->h;
}
}
end:
avfilter_inout_free(&input_links);
avfilter_inout_free(&output_links);
if (ret < 0)
lavfi_read_close(avctx);
return ret;
}
static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
LavfiContext *lavfi = avctx->priv_data;
double min_pts = DBL_MAX;
int min_pts_sink_idx;
AVFilterBufferRef *picref;
AVPicture pict;
int ret, i, size;
/* iterate through all the graph sinks. Select the sink with the
* minimum PTS */
for (i = 0; i < avctx->nb_streams; i++) {
AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
double d;
int ret = av_vsink_buffer_get_video_buffer_ref(lavfi->sinks[i],
&picref, AV_VSINK_BUF_FLAG_PEEK);
if (ret < 0)
return ret;
d = av_rescale_q(picref->pts, tb, AV_TIME_BASE_Q);
if (d < min_pts) {
min_pts = d;
min_pts_sink_idx = i;
}
}
av_vsink_buffer_get_video_buffer_ref(lavfi->sinks[min_pts_sink_idx],
&picref, 0);
size = avpicture_get_size(picref->format, picref->video->w, picref->video->h);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
memcpy(pict.data, picref->data, 4*sizeof(picref->data[0]));
memcpy(pict.linesize, picref->linesize, 4*sizeof(picref->linesize[0]));
avpicture_layout(&pict, picref->format, picref->video->w,
picref->video->h, pkt->data, size);
pkt->stream_index = lavfi->sink_stream_map[min_pts_sink_idx];
pkt->pts = picref->pts;
pkt->size = size;
avfilter_unref_buffer(picref);
return size;
}
#define OFFSET(x) offsetof(LavfiContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "graph", "Libavfilter graph", OFFSET(graph_str), FF_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
{ NULL },
};
static const AVClass lavfi_class = {
.class_name = "lavfi indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_lavfi_demuxer = {
.name = "lavfi",
.long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
.priv_data_size = sizeof(LavfiContext),
.read_header = lavfi_read_header,
.read_packet = lavfi_read_packet,
.read_close = lavfi_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &lavfi_class,
};