mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-26 19:01:44 +02:00
Merge remote-tracking branch 'cigaes/master'
* cigaes/master: lavfi/vf_yadif: use standard options parsing. lavfi/vf_unsharp: use standard options parsing. lavfi/vf_transpose: use standard options parsing. lavfi/vf_pad: use standard options parsing. lavfi/vf_fps: use standard options parsing. lavfi/vf_fade: use standard options parsing. lavi/vf_drawbox: use standard options parsing. lavfi/vf_delogo: use standard options parsing. lavfi/vf_decimate: use standard options parsing. lavfi/vf_crop: use standard options parsing. lavfi/af_volume: use standard options parsing. lavfi/vf_tile: use standard options parsing. lavfi/avf_concat: use standard options parsing. lavfi: add common code to handle options parsing. lavf/vobsub: free index pseudo-packet. ffmpeg: fix freeing of sub2video frame. lavfi: add sine audio source. lavu/opt: add AV_OPT_TYPE_DURATION. lavfi/concat: fix silence duration computation. lavf/concatdec: support seeking. Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
426ebdf923
@ -10,6 +10,7 @@ version <next>:
|
||||
- perms and aperms filters
|
||||
- audio filtering support in ffplay
|
||||
- 10% faster aac encoding on x86 and MIPS
|
||||
- sine audio filter source
|
||||
|
||||
|
||||
version 1.2:
|
||||
|
@ -15,6 +15,9 @@ libavutil: 2012-10-22
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2013-03-20 - xxxxxxx - lavu 52.22.100 - opt.h
|
||||
Add AV_OPT_TYPE_DURATION value to AVOptionType enum.
|
||||
|
||||
2013-03-17 - xxxxxx - lavu 52.20.100 - opt.h
|
||||
Add AV_OPT_TYPE_VIDEO_RATE value to AVOptionType enum.
|
||||
|
||||
|
@ -78,6 +78,9 @@ Duration of the file. This information can be specified from the file;
|
||||
specifying it here may be more efficient or help if the information from the
|
||||
file is not available or accurate.
|
||||
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
@ -1653,6 +1653,57 @@ ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
|
||||
For more information about libflite, check:
|
||||
@url{http://www.speech.cs.cmu.edu/flite/}
|
||||
|
||||
@section sine
|
||||
|
||||
Generate an audio signal made of a sine wave with amplitude 1/8.
|
||||
|
||||
The audio signal is bit-exact.
|
||||
|
||||
It accepts a list of options in the form of @var{key}=@var{value} pairs
|
||||
separated by ":". If the option name is omitted, the first option is the
|
||||
frequency and the second option is the beep factor.
|
||||
|
||||
The supported options are:
|
||||
|
||||
@table @option
|
||||
|
||||
@item frequency, f
|
||||
Set the carrier frequency. Default is 440 Hz.
|
||||
|
||||
@item beep_factor, b
|
||||
Enable a periodic beep every second with frequency @var{beep_factor} times
|
||||
the carrier frequency. Default is 0, meaning the beep is disabled.
|
||||
|
||||
@item sample_rate, s
|
||||
Specify the sample rate, default is 44100.
|
||||
|
||||
@item duration, d
|
||||
Specify the duration of the generated audio stream.
|
||||
|
||||
@item samples_per_frame
|
||||
Set the number of samples per output frame, default is 1024.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Generate a simple 440 Hz sine wave:
|
||||
@example
|
||||
sine
|
||||
@end example
|
||||
|
||||
@item
|
||||
Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
|
||||
@example
|
||||
sine=220:4:d=5
|
||||
sine=f=220:b=4:d=5
|
||||
sine=frequency=220:beep_factor=4:duration=5
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@c man end AUDIO SOURCES
|
||||
|
||||
@chapter Audio Sinks
|
||||
|
2
ffmpeg.c
2
ffmpeg.c
@ -482,7 +482,7 @@ static void exit_program(void)
|
||||
av_frame_free(&input_streams[i]->filter_frame);
|
||||
av_dict_free(&input_streams[i]->opts);
|
||||
avsubtitle_free(&input_streams[i]->prev_sub.subtitle);
|
||||
avcodec_free_frame(&input_streams[i]->sub2video.frame);
|
||||
av_frame_free(&input_streams[i]->sub2video.frame);
|
||||
av_freep(&input_streams[i]->filters);
|
||||
av_freep(&input_streams[i]);
|
||||
}
|
||||
|
@ -90,6 +90,7 @@ OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
|
||||
OBJS-$(CONFIG_AEVALSRC_FILTER) += asrc_aevalsrc.o
|
||||
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
|
||||
OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
|
||||
OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
|
||||
|
||||
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
|
||||
|
||||
|
@ -59,14 +59,6 @@ AVFILTER_DEFINE_CLASS(volume);
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
VolumeContext *vol = ctx->priv;
|
||||
static const char *shorthand[] = { "volume", "precision", NULL };
|
||||
int ret;
|
||||
|
||||
vol->class = &volume_class;
|
||||
av_opt_set_defaults(vol);
|
||||
|
||||
if ((ret = av_opt_set_from_string(vol, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
if (vol->precision == PRECISION_FIXED) {
|
||||
vol->volume_i = (int)(vol->volume * 256 + 0.5);
|
||||
@ -79,8 +71,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
precision_str[vol->precision]);
|
||||
}
|
||||
|
||||
av_opt_free(vol);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
@ -299,6 +290,8 @@ static const AVFilterPad avfilter_af_volume_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "volume", "precision", NULL };
|
||||
|
||||
AVFilter avfilter_af_volume = {
|
||||
.name = "volume",
|
||||
.description = NULL_IF_CONFIG_SMALL("Change input volume."),
|
||||
@ -308,4 +301,5 @@ AVFilter avfilter_af_volume = {
|
||||
.inputs = avfilter_af_volume_inputs,
|
||||
.outputs = avfilter_af_volume_outputs,
|
||||
.priv_class = &volume_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -86,6 +86,7 @@ void avfilter_register_all(void)
|
||||
REGISTER_FILTER(AEVALSRC, aevalsrc, asrc);
|
||||
REGISTER_FILTER(ANULLSRC, anullsrc, asrc);
|
||||
REGISTER_FILTER(FLITE, flite, asrc);
|
||||
REGISTER_FILTER(SINE, sine, asrc);
|
||||
|
||||
REGISTER_FILTER(ANULLSINK, anullsink, asink);
|
||||
|
||||
|
228
libavfilter/asrc_sine.c
Normal file
228
libavfilter/asrc_sine.c
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Nicolas George
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public License
|
||||
* as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "audio.h"
|
||||
#include "avfilter.h"
|
||||
#include "internal.h"
|
||||
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
double frequency;
|
||||
double beep_factor;
|
||||
int samples_per_frame;
|
||||
int sample_rate;
|
||||
int64_t duration;
|
||||
int16_t *sin;
|
||||
int64_t pts;
|
||||
uint32_t phi; ///< current phase of the sine (2pi = 1<<32)
|
||||
uint32_t dphi; ///< phase increment between two samples
|
||||
unsigned beep_period;
|
||||
unsigned beep_index;
|
||||
unsigned beep_length;
|
||||
uint32_t phi_beep; ///< current phase of the beep
|
||||
uint32_t dphi_beep; ///< phase increment of the beep
|
||||
} SineContext;
|
||||
|
||||
#define CONTEXT SineContext
|
||||
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
|
||||
|
||||
#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
|
||||
{ name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
|
||||
{ .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
|
||||
|
||||
#define OPT_INT(name, field, def, min, max, descr, ...) \
|
||||
OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
|
||||
|
||||
#define OPT_DBL(name, field, def, min, max, descr, ...) \
|
||||
OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__)
|
||||
|
||||
#define OPT_DUR(name, field, def, min, max, descr, ...) \
|
||||
OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__)
|
||||
|
||||
static const AVOption sine_options[] = {
|
||||
OPT_DBL("frequency", frequency, 440, 0, INFINITY, "set the sine frequency"),
|
||||
OPT_DBL("f", frequency, 440, 0, INFINITY, "set the sine frequency"),
|
||||
OPT_DBL("beep_factor", beep_factor, 0, 0, INFINITY, "set the beep fequency factor"),
|
||||
OPT_DBL("b", beep_factor, 0, 0, INFINITY, "set the beep fequency factor"),
|
||||
OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
|
||||
OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate"),
|
||||
OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"),
|
||||
OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"),
|
||||
OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"),
|
||||
{NULL},
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(sine);
|
||||
|
||||
#define LOG_PERIOD 15
|
||||
#define AMPLITUDE 4095
|
||||
#define AMPLITUDE_SHIFT 3
|
||||
|
||||
static void make_sin_table(int16_t *sin)
|
||||
{
|
||||
unsigned half_pi = 1 << (LOG_PERIOD - 2);
|
||||
unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT;
|
||||
uint64_t unit2 = (uint64_t)(ampls * ampls) << 32;
|
||||
unsigned step, i, c, s, k, new_k, n2;
|
||||
|
||||
/* Principle: if u = exp(i*a1) and v = exp(i*a2), then
|
||||
exp(i*(a1+a2)/2) = (u+v) / length(u+v) */
|
||||
sin[0] = 0;
|
||||
sin[half_pi] = ampls;
|
||||
for (step = half_pi; step > 1; step /= 2) {
|
||||
/* k = (1 << 16) * amplitude / length(u+v)
|
||||
In exact values, k is constant at a given step */
|
||||
k = 0x10000;
|
||||
for (i = 0; i < half_pi / 2; i += step) {
|
||||
s = sin[i] + sin[i + step];
|
||||
c = sin[half_pi - i] + sin[half_pi - i - step];
|
||||
n2 = s * s + c * c;
|
||||
/* Newton's method to solve n² * k² = unit² */
|
||||
while (1) {
|
||||
new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1;
|
||||
if (k == new_k)
|
||||
break;
|
||||
k = new_k;
|
||||
}
|
||||
sin[i + step / 2] = (k * s + 0x7FFF) >> 16;
|
||||
sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16;
|
||||
}
|
||||
}
|
||||
/* Unshift amplitude */
|
||||
for (i = 0; i <= half_pi; i++)
|
||||
sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT;
|
||||
/* Use symmetries to fill the other three quarters */
|
||||
for (i = 0; i < half_pi; i++)
|
||||
sin[half_pi * 2 - i] = sin[i];
|
||||
for (i = 0; i < 2 * half_pi; i++)
|
||||
sin[i + 2 * half_pi] = -sin[i];
|
||||
}
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
SineContext *sine = ctx->priv;
|
||||
static const char *shorthand[] = { "frequency", "beep_factor", NULL };
|
||||
int ret;
|
||||
|
||||
sine->class = &sine_class;
|
||||
av_opt_set_defaults(sine);
|
||||
|
||||
if ((ret = av_opt_set_from_string(sine, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
|
||||
return AVERROR(ENOMEM);
|
||||
sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
|
||||
make_sin_table(sine->sin);
|
||||
|
||||
if (sine->beep_factor) {
|
||||
sine->beep_period = sine->sample_rate;
|
||||
sine->beep_length = sine->beep_period / 25;
|
||||
sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) /
|
||||
sine->sample_rate + 0.5;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
SineContext *sine = ctx->priv;
|
||||
|
||||
av_freep(&sine->sin);
|
||||
}
|
||||
|
||||
static av_cold int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
SineContext *sine = ctx->priv;
|
||||
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
int sample_rates[] = { sine->sample_rate, -1 };
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
|
||||
AV_SAMPLE_FMT_NONE };
|
||||
|
||||
ff_set_common_formats (ctx, ff_make_format_list(sample_fmts));
|
||||
ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
|
||||
ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int config_props(AVFilterLink *outlink)
|
||||
{
|
||||
SineContext *sine = outlink->src->priv;
|
||||
sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int request_frame(AVFilterLink *outlink)
|
||||
{
|
||||
SineContext *sine = outlink->src->priv;
|
||||
AVFrame *frame;
|
||||
int i, nb_samples = sine->samples_per_frame;
|
||||
int16_t *samples;
|
||||
|
||||
if (sine->duration) {
|
||||
nb_samples = FFMIN(nb_samples, sine->duration - sine->pts);
|
||||
av_assert1(nb_samples >= 0);
|
||||
if (!nb_samples)
|
||||
return AVERROR_EOF;
|
||||
}
|
||||
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
|
||||
return AVERROR(ENOMEM);
|
||||
samples = (int16_t *)frame->data[0];
|
||||
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)];
|
||||
sine->phi += sine->dphi;
|
||||
if (sine->beep_index < sine->beep_length) {
|
||||
samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1;
|
||||
sine->phi_beep += sine->dphi_beep;
|
||||
}
|
||||
if (++sine->beep_index == sine->beep_period)
|
||||
sine->beep_index = 0;
|
||||
}
|
||||
|
||||
frame->pts = sine->pts;
|
||||
sine->pts += nb_samples;
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
|
||||
static const AVFilterPad sine_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.request_frame = request_frame,
|
||||
.config_props = config_props,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_asrc_sine = {
|
||||
.name = "sine",
|
||||
.description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
|
||||
.query_formats = query_formats,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SineContext),
|
||||
.inputs = NULL,
|
||||
.outputs = sine_outputs,
|
||||
.priv_class = &sine_class,
|
||||
};
|
@ -232,7 +232,7 @@ static void close_input(AVFilterContext *ctx, unsigned in_no)
|
||||
ctx->input_pads[in_no].name, cat->nb_in_active);
|
||||
}
|
||||
|
||||
static void find_next_delta_ts(AVFilterContext *ctx)
|
||||
static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
|
||||
{
|
||||
ConcatContext *cat = ctx->priv;
|
||||
unsigned i = cat->cur_idx;
|
||||
@ -243,13 +243,15 @@ static void find_next_delta_ts(AVFilterContext *ctx)
|
||||
for (; i < imax; i++)
|
||||
pts = FFMAX(pts, cat->in[i].pts);
|
||||
cat->delta_ts += pts;
|
||||
*seg_delta = pts;
|
||||
}
|
||||
|
||||
static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
|
||||
static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
|
||||
int64_t seg_delta)
|
||||
{
|
||||
ConcatContext *cat = ctx->priv;
|
||||
AVFilterLink *outlink = ctx->outputs[out_no];
|
||||
int64_t base_pts = cat->in[in_no].pts + cat->delta_ts;
|
||||
int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
|
||||
int64_t nb_samples, sent = 0;
|
||||
int frame_nb_samples, ret;
|
||||
AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
|
||||
@ -258,7 +260,7 @@ static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
|
||||
|
||||
if (!rate_tb.den)
|
||||
return AVERROR_BUG;
|
||||
nb_samples = av_rescale_q(cat->delta_ts - base_pts,
|
||||
nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
|
||||
outlink->time_base, rate_tb);
|
||||
frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
|
||||
while (nb_samples) {
|
||||
@ -283,8 +285,9 @@ static int flush_segment(AVFilterContext *ctx)
|
||||
int ret;
|
||||
ConcatContext *cat = ctx->priv;
|
||||
unsigned str, str_max;
|
||||
int64_t seg_delta;
|
||||
|
||||
find_next_delta_ts(ctx);
|
||||
find_next_delta_ts(ctx, &seg_delta);
|
||||
cat->cur_idx += ctx->nb_outputs;
|
||||
cat->nb_in_active = ctx->nb_outputs;
|
||||
av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
|
||||
@ -295,7 +298,8 @@ static int flush_segment(AVFilterContext *ctx)
|
||||
str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
|
||||
str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
|
||||
for (; str < str_max; str++) {
|
||||
ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str);
|
||||
ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
|
||||
seg_delta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -354,17 +358,8 @@ static int request_frame(AVFilterLink *outlink)
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
ConcatContext *cat = ctx->priv;
|
||||
int ret;
|
||||
unsigned seg, type, str;
|
||||
|
||||
cat->class = &concat_class;
|
||||
av_opt_set_defaults(cat);
|
||||
ret = av_set_options_string(cat, args, "=", ":");
|
||||
if (ret < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* create input pads */
|
||||
for (seg = 0; seg < cat->nb_segments; seg++) {
|
||||
for (type = 0; type < TYPE_ALL; type++) {
|
||||
@ -414,6 +409,8 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_free(cat->in);
|
||||
}
|
||||
|
||||
static const char *const shorthand[] = { NULL };
|
||||
|
||||
AVFilter avfilter_avf_concat = {
|
||||
.name = "concat",
|
||||
.description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
|
||||
@ -424,4 +421,5 @@ AVFilter avfilter_avf_concat = {
|
||||
.inputs = NULL,
|
||||
.outputs = NULL,
|
||||
.priv_class = &concat_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/rational.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
@ -556,6 +557,8 @@ void avfilter_free(AVFilterContext *filter)
|
||||
|
||||
if (filter->filter->uninit)
|
||||
filter->filter->uninit(filter);
|
||||
if (filter->filter->shorthand)
|
||||
av_opt_free(filter->priv);
|
||||
|
||||
for (i = 0; i < filter->nb_inputs; i++) {
|
||||
if ((link = filter->inputs[i])) {
|
||||
@ -600,6 +603,17 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque
|
||||
{
|
||||
int ret=0;
|
||||
|
||||
if (filter->filter->shorthand) {
|
||||
av_assert0(filter->priv);
|
||||
av_assert0(filter->filter->priv_class);
|
||||
*(const AVClass **)filter->priv = filter->filter->priv_class;
|
||||
av_opt_set_defaults(filter->priv);
|
||||
ret = av_opt_set_from_string(filter->priv, args,
|
||||
filter->filter->shorthand, "=", ":");
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
args = NULL;
|
||||
}
|
||||
if (filter->filter->init_opaque)
|
||||
ret = filter->filter->init_opaque(filter, args, opaque);
|
||||
else if (filter->filter->init)
|
||||
|
@ -486,6 +486,15 @@ typedef struct AVFilter {
|
||||
int (*init_opaque)(AVFilterContext *ctx, const char *args, void *opaque);
|
||||
|
||||
const AVClass *priv_class; ///< private class, containing filter specific options
|
||||
|
||||
/**
|
||||
* Shorthand syntax for init arguments.
|
||||
* If this field is set (even to an empty list), just before init the
|
||||
* private class will be set and the arguments string will be parsed
|
||||
* using av_opt_set_from_string() with "=" and ":" delimiters, and
|
||||
* av_opt_free() will be called just after uninit.
|
||||
*/
|
||||
const char *const *shorthand;
|
||||
} AVFilter;
|
||||
|
||||
/** An instance of a filter */
|
||||
|
@ -29,8 +29,8 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 3
|
||||
#define LIBAVFILTER_VERSION_MINOR 47
|
||||
#define LIBAVFILTER_VERSION_MICRO 104
|
||||
#define LIBAVFILTER_VERSION_MINOR 48
|
||||
#define LIBAVFILTER_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
LIBAVFILTER_VERSION_MINOR, \
|
||||
|
@ -107,24 +107,12 @@ static const AVOption crop_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(crop);
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
CropContext *crop = ctx->priv;
|
||||
static const char *shorthand[] = { "w", "h", "x", "y", "keep_aspect", NULL };
|
||||
|
||||
crop->class = &crop_class;
|
||||
av_opt_set_defaults(crop);
|
||||
|
||||
return av_opt_set_from_string(crop, args, shorthand, "=", ":");
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
CropContext *crop = ctx->priv;
|
||||
|
||||
av_expr_free(crop->x_pexpr); crop->x_pexpr = NULL;
|
||||
av_expr_free(crop->y_pexpr); crop->y_pexpr = NULL;
|
||||
av_opt_free(crop);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
@ -348,6 +336,8 @@ static const AVFilterPad avfilter_vf_crop_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "w", "h", "x", "y", "keep_aspect", NULL };
|
||||
|
||||
AVFilter avfilter_vf_crop = {
|
||||
.name = "crop",
|
||||
.description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
|
||||
@ -355,10 +345,10 @@ AVFilter avfilter_vf_crop = {
|
||||
.priv_size = sizeof(CropContext),
|
||||
|
||||
.query_formats = query_formats,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.inputs = avfilter_vf_crop_inputs,
|
||||
.outputs = avfilter_vf_crop_outputs,
|
||||
.priv_class = &crop_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -132,14 +132,6 @@ static int decimate_frame(AVFilterContext *ctx,
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
DecimateContext *decimate = ctx->priv;
|
||||
static const char *shorthand[] = { "max", "hi", "lo", "frac", NULL };
|
||||
int ret;
|
||||
|
||||
decimate->class = &decimate_class;
|
||||
av_opt_set_defaults(decimate);
|
||||
|
||||
if ((ret = av_opt_set_from_string(decimate, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "max_drop_count:%d hi:%d lo:%d frac:%f\n",
|
||||
decimate->max_drop_count, decimate->hi, decimate->lo, decimate->frac);
|
||||
@ -157,7 +149,6 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
DecimateContext *decimate = ctx->priv;
|
||||
av_frame_free(&decimate->ref);
|
||||
avcodec_close(decimate->avctx);
|
||||
av_opt_free(decimate);
|
||||
av_freep(&decimate->avctx);
|
||||
}
|
||||
|
||||
@ -251,6 +242,8 @@ static const AVFilterPad decimate_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "max", "hi", "lo", "frac", NULL };
|
||||
|
||||
AVFilter avfilter_vf_decimate = {
|
||||
.name = "decimate",
|
||||
.description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
|
||||
@ -262,4 +255,5 @@ AVFilter avfilter_vf_decimate = {
|
||||
.inputs = decimate_inputs,
|
||||
.outputs = decimate_outputs,
|
||||
.priv_class = &decimate_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -171,14 +171,6 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
DelogoContext *delogo = ctx->priv;
|
||||
int ret = 0;
|
||||
static const char *shorthand[] = { "x", "y", "w", "h", "band", NULL };
|
||||
|
||||
delogo->class = &delogo_class;
|
||||
av_opt_set_defaults(delogo);
|
||||
|
||||
if ((ret = av_opt_set_from_string(delogo, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
#define CHECK_UNSET_OPT(opt) \
|
||||
if (delogo->opt == -1) { \
|
||||
@ -267,6 +259,8 @@ static const AVFilterPad avfilter_vf_delogo_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "x", "y", "w", "h", "band", NULL };
|
||||
|
||||
AVFilter avfilter_vf_delogo = {
|
||||
.name = "delogo",
|
||||
.description = NULL_IF_CONFIG_SMALL("Remove logo from input video."),
|
||||
@ -277,4 +271,5 @@ AVFilter avfilter_vf_delogo = {
|
||||
.inputs = avfilter_vf_delogo_inputs,
|
||||
.outputs = avfilter_vf_delogo_outputs,
|
||||
.priv_class = &delogo_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -68,14 +68,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
DrawBoxContext *drawbox = ctx->priv;
|
||||
uint8_t rgba_color[4];
|
||||
static const char *shorthand[] = { "x", "y", "w", "h", "color", "thickness", NULL };
|
||||
int ret;
|
||||
|
||||
drawbox->class = &drawbox_class;
|
||||
av_opt_set_defaults(drawbox);
|
||||
|
||||
if ((ret = av_opt_set_from_string(drawbox, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
if (!strcmp(drawbox->color_str, "invert"))
|
||||
drawbox->invert_color = 1;
|
||||
@ -92,12 +84,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
DrawBoxContext *drawbox = ctx->priv;
|
||||
av_opt_free(drawbox);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
static const enum AVPixelFormat pix_fmts[] = {
|
||||
@ -185,15 +171,17 @@ static const AVFilterPad avfilter_vf_drawbox_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "x", "y", "w", "h", "color", "thickness", NULL };
|
||||
|
||||
AVFilter avfilter_vf_drawbox = {
|
||||
.name = "drawbox",
|
||||
.description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
|
||||
.priv_size = sizeof(DrawBoxContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.query_formats = query_formats,
|
||||
.inputs = avfilter_vf_drawbox_inputs,
|
||||
.outputs = avfilter_vf_drawbox_outputs,
|
||||
.priv_class = &drawbox_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -78,14 +78,6 @@ AVFILTER_DEFINE_CLASS(fade);
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
FadeContext *fade = ctx->priv;
|
||||
static const char *shorthand[] = { "type", "start_frame", "nb_frames", NULL };
|
||||
int ret;
|
||||
|
||||
fade->class = &fade_class;
|
||||
av_opt_set_defaults(fade);
|
||||
|
||||
if ((ret = av_opt_set_from_string(fade, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
fade->fade_per_frame = (1 << 16) / fade->nb_frames;
|
||||
if (!strcmp(fade->type, "in"))
|
||||
@ -106,13 +98,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
FadeContext *fade = ctx->priv;
|
||||
|
||||
av_opt_free(fade);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
static const enum AVPixelFormat pix_fmts[] = {
|
||||
@ -247,15 +232,17 @@ static const AVFilterPad avfilter_vf_fade_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "type", "start_frame", "nb_frames", NULL };
|
||||
|
||||
AVFilter avfilter_vf_fade = {
|
||||
.name = "fade",
|
||||
.description = NULL_IF_CONFIG_SMALL("Fade in/out input video."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(FadeContext),
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_fade_inputs,
|
||||
.outputs = avfilter_vf_fade_outputs,
|
||||
.priv_class = &fade_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -74,20 +74,12 @@ AVFILTER_DEFINE_CLASS(fps);
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
FPSContext *s = ctx->priv;
|
||||
const char *shorthand[] = { "fps", "round", NULL };
|
||||
int ret;
|
||||
|
||||
s->class = &fps_class;
|
||||
av_opt_set_defaults(s);
|
||||
|
||||
if ((ret = av_opt_set_from_string(s, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = av_parse_video_rate(&s->framerate, s->fps)) < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Error parsing framerate %s.\n", s->fps);
|
||||
return ret;
|
||||
}
|
||||
av_opt_free(s);
|
||||
|
||||
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*))))
|
||||
return AVERROR(ENOMEM);
|
||||
@ -288,6 +280,8 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "fps", "round", NULL };
|
||||
|
||||
AVFilter avfilter_vf_fps = {
|
||||
.name = "fps",
|
||||
.description = NULL_IF_CONFIG_SMALL("Force constant framerate"),
|
||||
@ -300,4 +294,5 @@ AVFilter avfilter_vf_fps = {
|
||||
.inputs = avfilter_vf_fps_inputs,
|
||||
.outputs = avfilter_vf_fps_outputs,
|
||||
.priv_class = &fps_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -111,14 +111,6 @@ AVFILTER_DEFINE_CLASS(pad);
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
PadContext *pad = ctx->priv;
|
||||
static const char *shorthand[] = { "width", "height", "x", "y", "color", NULL };
|
||||
int ret;
|
||||
|
||||
pad->class = &pad_class;
|
||||
av_opt_set_defaults(pad);
|
||||
|
||||
if ((ret = av_opt_set_from_string(pad, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
if (av_parse_color(pad->rgba_color, pad->color_str, -1, ctx) < 0)
|
||||
return AVERROR(EINVAL);
|
||||
@ -126,12 +118,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
PadContext *pad = ctx->priv;
|
||||
av_opt_free(pad);
|
||||
}
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
@ -416,17 +402,19 @@ static const AVFilterPad avfilter_vf_pad_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "width", "height", "x", "y", "color", NULL };
|
||||
|
||||
AVFilter avfilter_vf_pad = {
|
||||
.name = "pad",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pad input image to width:height[:x:y[:color]] (default x and y: 0, default color: black)."),
|
||||
|
||||
.priv_size = sizeof(PadContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_pad_inputs,
|
||||
|
||||
.outputs = avfilter_vf_pad_outputs,
|
||||
.priv_class = &pad_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -65,14 +65,6 @@ AVFILTER_DEFINE_CLASS(tile);
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
TileContext *tile = ctx->priv;
|
||||
static const char *shorthand[] = { "layout", "nb_frames", "margin", "padding", NULL };
|
||||
int ret;
|
||||
|
||||
tile->class = &tile_class;
|
||||
av_opt_set_defaults(tile);
|
||||
|
||||
if ((ret = av_opt_set_from_string(tile, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
if (tile->w > REASONABLE_SIZE || tile->h > REASONABLE_SIZE) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Tile size %ux%u is insane.\n",
|
||||
@ -243,6 +235,9 @@ static const AVFilterPad tile_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] =
|
||||
{ "layout", "nb_frames", "margin", "padding", NULL };
|
||||
|
||||
AVFilter avfilter_vf_tile = {
|
||||
.name = "tile",
|
||||
.description = NULL_IF_CONFIG_SMALL("Tile several successive frames together."),
|
||||
@ -252,4 +247,5 @@ AVFilter avfilter_vf_tile = {
|
||||
.inputs = tile_inputs,
|
||||
.outputs = tile_outputs,
|
||||
.priv_class = &tile_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -73,17 +73,6 @@ static const AVOption transpose_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(transpose);
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
TransContext *trans = ctx->priv;
|
||||
const char *shorthand[] = { "dir", "passthrough", NULL };
|
||||
|
||||
trans->class = &transpose_class;
|
||||
av_opt_set_defaults(trans);
|
||||
|
||||
return av_opt_set_from_string(trans, args, shorthand, "=", ":");
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
AVFilterFormats *pix_fmts = NULL;
|
||||
@ -266,11 +255,12 @@ static const AVFilterPad avfilter_vf_transpose_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "dir", "passthrough", NULL };
|
||||
|
||||
AVFilter avfilter_vf_transpose = {
|
||||
.name = "transpose",
|
||||
.description = NULL_IF_CONFIG_SMALL("Transpose input video."),
|
||||
|
||||
.init = init,
|
||||
.priv_size = sizeof(TransContext),
|
||||
|
||||
.query_formats = query_formats,
|
||||
@ -278,4 +268,5 @@ AVFilter avfilter_vf_transpose = {
|
||||
.inputs = avfilter_vf_transpose_inputs,
|
||||
.outputs = avfilter_vf_transpose_outputs,
|
||||
.priv_class = &transpose_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -169,18 +169,6 @@ static void set_filter_param(FilterParam *fp, int msize_x, int msize_y, double a
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
UnsharpContext *unsharp = ctx->priv;
|
||||
static const char *shorthand[] = {
|
||||
"luma_msize_x", "luma_msize_y", "luma_amount",
|
||||
"chroma_msize_x", "chroma_msize_y", "chroma_amount",
|
||||
NULL
|
||||
};
|
||||
int ret;
|
||||
|
||||
unsharp->class = &unsharp_class;
|
||||
av_opt_set_defaults(unsharp);
|
||||
|
||||
if ((ret = av_opt_set_from_string(unsharp, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
set_filter_param(&unsharp->luma, unsharp->luma_msize_x, unsharp->luma_msize_y, unsharp->luma_amount);
|
||||
set_filter_param(&unsharp->chroma, unsharp->chroma_msize_x, unsharp->chroma_msize_y, unsharp->chroma_amount);
|
||||
@ -256,7 +244,6 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
free_filter_param(&unsharp->luma);
|
||||
free_filter_param(&unsharp->chroma);
|
||||
av_opt_free(unsharp);
|
||||
}
|
||||
|
||||
static int filter_frame(AVFilterLink *link, AVFrame *in)
|
||||
@ -300,6 +287,12 @@ static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = {
|
||||
"luma_msize_x", "luma_msize_y", "luma_amount",
|
||||
"chroma_msize_x", "chroma_msize_y", "chroma_amount",
|
||||
NULL
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_unsharp = {
|
||||
.name = "unsharp",
|
||||
.description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
|
||||
@ -315,4 +308,5 @@ AVFilter avfilter_vf_unsharp = {
|
||||
.outputs = avfilter_vf_unsharp_outputs,
|
||||
|
||||
.priv_class = &unsharp_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -377,7 +377,6 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_frame_free(&yadif->prev);
|
||||
av_frame_free(&yadif->cur );
|
||||
av_frame_free(&yadif->next);
|
||||
av_opt_free(yadif);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
@ -424,14 +423,6 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
static const char *shorthand[] = { "mode", "parity", "deint", NULL };
|
||||
int ret;
|
||||
|
||||
yadif->class = &yadif_class;
|
||||
av_opt_set_defaults(yadif);
|
||||
|
||||
if ((ret = av_opt_set_from_string(yadif, args, shorthand, "=", ":")) < 0)
|
||||
return ret;
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "mode:%d parity:%d deint:%d\n",
|
||||
yadif->mode, yadif->parity, yadif->deint);
|
||||
@ -491,6 +482,8 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const char *const shorthand[] = { "mode", "parity", "deint", NULL };
|
||||
|
||||
AVFilter avfilter_vf_yadif = {
|
||||
.name = "yadif",
|
||||
.description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
|
||||
@ -504,4 +497,5 @@ AVFilter avfilter_vf_yadif = {
|
||||
.outputs = avfilter_vf_yadif_outputs,
|
||||
|
||||
.priv_class = &yadif_class,
|
||||
.shorthand = shorthand,
|
||||
};
|
||||
|
@ -37,6 +37,7 @@ typedef struct {
|
||||
unsigned nb_files;
|
||||
AVFormatContext *avf;
|
||||
int safe;
|
||||
int seekable;
|
||||
} ConcatContext;
|
||||
|
||||
static int concat_probe(AVProbeData *probe)
|
||||
@ -128,6 +129,8 @@ static int open_file(AVFormatContext *avf, unsigned fileno)
|
||||
ConcatFile *file = &cat->files[fileno];
|
||||
int ret;
|
||||
|
||||
if (cat->avf)
|
||||
avformat_close_input(&cat->avf);
|
||||
if ((ret = avformat_open_input(&cat->avf, file->url, NULL, NULL)) < 0 ||
|
||||
(ret = avformat_find_stream_info(cat->avf, NULL)) < 0) {
|
||||
av_log(avf, AV_LOG_ERROR, "Impossible to open '%s'\n", file->url);
|
||||
@ -223,8 +226,10 @@ static int concat_read_header(AVFormatContext *avf)
|
||||
break;
|
||||
time += cat->files[i].duration;
|
||||
}
|
||||
if (i == cat->nb_files)
|
||||
if (i == cat->nb_files) {
|
||||
avf->duration = time;
|
||||
cat->seekable = 1;
|
||||
}
|
||||
|
||||
if ((ret = open_file(avf, 0)) < 0)
|
||||
FAIL(ret);
|
||||
@ -257,7 +262,6 @@ static int open_next_file(AVFormatContext *avf)
|
||||
|
||||
if (++fileno >= cat->nb_files)
|
||||
return AVERROR_EOF;
|
||||
avformat_close_input(&cat->avf);
|
||||
return open_file(avf, fileno);
|
||||
}
|
||||
|
||||
@ -282,6 +286,95 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rescale_interval(AVRational tb_in, AVRational tb_out,
|
||||
int64_t *min_ts, int64_t *ts, int64_t *max_ts)
|
||||
{
|
||||
*ts = av_rescale_q (* ts, tb_in, tb_out);
|
||||
*min_ts = av_rescale_q_rnd(*min_ts, tb_in, tb_out,
|
||||
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
|
||||
*max_ts = av_rescale_q_rnd(*max_ts, tb_in, tb_out,
|
||||
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
|
||||
}
|
||||
|
||||
static int try_seek(AVFormatContext *avf, int stream,
|
||||
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
|
||||
{
|
||||
ConcatContext *cat = avf->priv_data;
|
||||
int64_t t0 = cat->cur_file->start_time - cat->avf->start_time;
|
||||
|
||||
ts -= t0;
|
||||
min_ts = min_ts == INT64_MIN ? INT64_MIN : min_ts - t0;
|
||||
max_ts = max_ts == INT64_MAX ? INT64_MAX : max_ts - t0;
|
||||
if (stream >= 0) {
|
||||
if (stream >= cat->avf->nb_streams)
|
||||
return AVERROR(EIO);
|
||||
rescale_interval(AV_TIME_BASE_Q, cat->avf->streams[stream]->time_base,
|
||||
&min_ts, &ts, &max_ts);
|
||||
}
|
||||
return avformat_seek_file(cat->avf, stream, min_ts, ts, max_ts, flags);
|
||||
}
|
||||
|
||||
static int real_seek(AVFormatContext *avf, int stream,
|
||||
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
|
||||
{
|
||||
ConcatContext *cat = avf->priv_data;
|
||||
int ret, left, right;
|
||||
|
||||
if (stream >= 0) {
|
||||
if (stream >= avf->nb_streams)
|
||||
return AVERROR(EINVAL);
|
||||
rescale_interval(avf->streams[stream]->time_base, AV_TIME_BASE_Q,
|
||||
&min_ts, &ts, &max_ts);
|
||||
}
|
||||
|
||||
left = 0;
|
||||
right = cat->nb_files;
|
||||
while (right - left > 1) {
|
||||
int mid = (left + right) / 2;
|
||||
if (ts < cat->files[mid].start_time)
|
||||
right = mid;
|
||||
else
|
||||
left = mid;
|
||||
}
|
||||
|
||||
if ((ret = open_file(avf, left)) < 0)
|
||||
return ret;
|
||||
|
||||
ret = try_seek(avf, stream, min_ts, ts, max_ts, flags);
|
||||
if (ret < 0 && !(flags & AVSEEK_FLAG_BACKWARD) &&
|
||||
left < cat->nb_files - 1 &&
|
||||
cat->files[left + 1].start_time < max_ts) {
|
||||
if ((ret = open_file(avf, left + 1)) < 0)
|
||||
return ret;
|
||||
ret = try_seek(avf, stream, min_ts, ts, max_ts, flags);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int concat_seek(AVFormatContext *avf, int stream,
|
||||
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
|
||||
{
|
||||
ConcatContext *cat = avf->priv_data;
|
||||
ConcatFile *cur_file_saved = cat->cur_file;
|
||||
AVFormatContext *cur_avf_saved = cat->avf;
|
||||
int ret;
|
||||
|
||||
if (!cat->seekable)
|
||||
return AVERROR(ESPIPE); /* XXX: can we use it? */
|
||||
if (flags & (AVSEEK_FLAG_BYTE | AVSEEK_FLAG_FRAME))
|
||||
return AVERROR(ENOSYS);
|
||||
cat->avf = NULL;
|
||||
if ((ret = real_seek(avf, stream, min_ts, ts, max_ts, flags)) < 0) {
|
||||
if (cat->avf)
|
||||
avformat_close_input(&cat->avf);
|
||||
cat->avf = cur_avf_saved;
|
||||
cat->cur_file = cur_file_saved;
|
||||
} else {
|
||||
avformat_close_input(&cur_avf_saved);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(ConcatContext, x)
|
||||
#define DEC AV_OPT_FLAG_DECODING_PARAM
|
||||
|
||||
@ -307,5 +400,6 @@ AVInputFormat ff_concat_demuxer = {
|
||||
.read_header = concat_read_header,
|
||||
.read_packet = concat_read_packet,
|
||||
.read_close = concat_read_close,
|
||||
.read_seek2 = concat_seek,
|
||||
.priv_class = &concat_class,
|
||||
};
|
||||
|
@ -805,6 +805,8 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define FAIL(r) do { ret = r; goto fail; } while (0)
|
||||
|
||||
static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
MpegDemuxContext *vobsub = s->priv_data;
|
||||
@ -838,7 +840,7 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
ret = mpegps_read_pes_header(vobsub->sub_ctx, NULL, &startcode, &pts, &dts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
FAIL(ret);
|
||||
to_read = ret & 0xffff;
|
||||
|
||||
/* this prevents reads above the current packet */
|
||||
@ -855,7 +857,7 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
ret = av_grow_packet(pkt, to_read);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
FAIL(ret);
|
||||
|
||||
n = avio_read(pb, pkt->data + (pkt->size - to_read), to_read);
|
||||
if (n < to_read)
|
||||
@ -870,7 +872,12 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
pkt->pos = idx_pkt.pos;
|
||||
pkt->stream_index = idx_pkt.stream_index;
|
||||
|
||||
av_free_packet(&idx_pkt);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_free_packet(&idx_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vobsub_read_seek(AVFormatContext *s, int stream_index,
|
||||
|
@ -77,6 +77,7 @@ static int read_number(const AVOption *o, void *dst, double *num, int *den, int6
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_INT: *intnum = *(int *)dst;return 0;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
case AV_OPT_TYPE_INT64: *intnum = *(int64_t *)dst;return 0;
|
||||
case AV_OPT_TYPE_FLOAT: *num = *(float *)dst;return 0;
|
||||
case AV_OPT_TYPE_DOUBLE: *num = *(double *)dst;return 0;
|
||||
@ -101,6 +102,7 @@ static int write_number(void *obj, const AVOption *o, void *dst, double num, int
|
||||
case AV_OPT_TYPE_PIXEL_FMT:
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
case AV_OPT_TYPE_INT64: *(int64_t *)dst= llrint(num/den)*intnum; break;
|
||||
case AV_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break;
|
||||
case AV_OPT_TYPE_DOUBLE:*(double *)dst= num*intnum/den; break;
|
||||
@ -256,7 +258,8 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
if (!val && (o->type != AV_OPT_TYPE_STRING &&
|
||||
o->type != AV_OPT_TYPE_PIXEL_FMT && o->type != AV_OPT_TYPE_SAMPLE_FMT &&
|
||||
o->type != AV_OPT_TYPE_IMAGE_SIZE && o->type != AV_OPT_TYPE_VIDEO_RATE))
|
||||
o->type != AV_OPT_TYPE_IMAGE_SIZE && o->type != AV_OPT_TYPE_VIDEO_RATE &&
|
||||
o->type != AV_OPT_TYPE_DURATION))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
dst = ((uint8_t*)target_obj) + o->offset;
|
||||
@ -319,6 +322,15 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
|
||||
}
|
||||
*(enum AVSampleFormat *)dst = ret;
|
||||
return 0;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
if (!val) {
|
||||
*(int64_t *)dst = 0;
|
||||
return 0;
|
||||
} else {
|
||||
if ((ret = av_parse_time(dst, val, 1)) < 0)
|
||||
av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", val);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
av_log(obj, AV_LOG_ERROR, "Invalid option type.\n");
|
||||
@ -556,6 +568,7 @@ int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
|
||||
const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
|
||||
uint8_t *bin, buf[128];
|
||||
int len, i, ret;
|
||||
int64_t i64;
|
||||
|
||||
if (!o || !target_obj || (o->offset<=0 && o->type != AV_OPT_TYPE_CONST))
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
@ -597,6 +610,12 @@ int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
ret = snprintf(buf, sizeof(buf), "%s", (char *)av_x_if_null(av_get_sample_fmt_name(*(enum AVSampleFormat *)dst), "none"));
|
||||
break;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
i64 = *(int64_t *)dst;
|
||||
ret = snprintf(buf, sizeof(buf), "%"PRIi64"d:%02d:%02d.%06d",
|
||||
i64 / 3600000000, (int)((i64 / 60000000) % 60),
|
||||
(int)((i64 / 1000000) % 60), (int)(i64 % 1000000));
|
||||
break;
|
||||
default:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@ -861,6 +880,9 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<sample_fmt>");
|
||||
break;
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<duration>");
|
||||
break;
|
||||
case AV_OPT_TYPE_CONST:
|
||||
default:
|
||||
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "");
|
||||
@ -937,6 +959,7 @@ void av_opt_set_defaults2(void *s, int mask, int flags)
|
||||
case AV_OPT_TYPE_FLAGS:
|
||||
case AV_OPT_TYPE_INT:
|
||||
case AV_OPT_TYPE_INT64:
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
av_opt_set_int(s, opt->name, opt->default_val.i64, 0);
|
||||
break;
|
||||
case AV_OPT_TYPE_DOUBLE:
|
||||
@ -1300,6 +1323,7 @@ int av_opt_query_ranges_default(AVOptionRanges **ranges_arg, void *obj, const ch
|
||||
case AV_OPT_TYPE_SAMPLE_FMT:
|
||||
case AV_OPT_TYPE_FLOAT:
|
||||
case AV_OPT_TYPE_DOUBLE:
|
||||
case AV_OPT_TYPE_DURATION:
|
||||
break;
|
||||
case AV_OPT_TYPE_STRING:
|
||||
range->component_min = 0;
|
||||
@ -1365,6 +1389,7 @@ typedef struct TestContext
|
||||
int w, h;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
enum AVSampleFormat sample_fmt;
|
||||
int64_t duration;
|
||||
} TestContext;
|
||||
|
||||
#define OFFSET(x) offsetof(TestContext, x)
|
||||
@ -1386,6 +1411,7 @@ static const AVOption test_options[]= {
|
||||
{"pix_fmt", "set pixfmt", OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, AV_PIX_FMT_NB-1},
|
||||
{"sample_fmt", "set samplefmt", OFFSET(sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, {.i64 = AV_SAMPLE_FMT_NONE}, -1, AV_SAMPLE_FMT_NB-1},
|
||||
{"video_rate", "set videorate", OFFSET(video_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0 },
|
||||
{"duration", "set duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX},
|
||||
{NULL},
|
||||
};
|
||||
|
||||
@ -1441,6 +1467,9 @@ int main(void)
|
||||
"video_rate=30000/1001",
|
||||
"video_rate=30/1.001",
|
||||
"video_rate=bogus",
|
||||
"duration=bogus",
|
||||
"duration=123.45",
|
||||
"duration=1\\:23\\:45.67",
|
||||
};
|
||||
|
||||
test_ctx.class = &test_class;
|
||||
|
@ -231,6 +231,7 @@ enum AVOptionType{
|
||||
AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'),
|
||||
AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'),
|
||||
AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational
|
||||
AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '),
|
||||
#if FF_API_OLD_AVOPTIONS
|
||||
FF_OPT_TYPE_FLAGS = 0,
|
||||
FF_OPT_TYPE_INT,
|
||||
|
@ -75,7 +75,7 @@
|
||||
*/
|
||||
|
||||
#define LIBAVUTIL_VERSION_MAJOR 52
|
||||
#define LIBAVUTIL_VERSION_MINOR 21
|
||||
#define LIBAVUTIL_VERSION_MINOR 22
|
||||
#define LIBAVUTIL_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
|
||||
|
Loading…
Reference in New Issue
Block a user