mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
lavfi/perspective: Add basic timeline editing.
Add number of input and output frames to possible variables. Add option eval to reevaluate coordinate expressions during initialization or for every frame.
This commit is contained in:
parent
d97a61a8f1
commit
37a4d3383f
@ -10128,6 +10128,10 @@ The expressions can use the following variables:
|
||||
@item W
|
||||
@item H
|
||||
the width and height of video frame.
|
||||
@item in
|
||||
Input frame count.
|
||||
@item on
|
||||
Output frame count.
|
||||
@end table
|
||||
|
||||
@item interpolation
|
||||
@ -10158,6 +10162,21 @@ by the given coordinates.
|
||||
|
||||
Default value is @samp{source}.
|
||||
@end table
|
||||
|
||||
@item eval
|
||||
Set when the expressions for coordinates @option{x0,y0,...x3,y3} are evaluated.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item init
|
||||
only evaluate expressions once during the filter initialization or
|
||||
when a command is processed
|
||||
|
||||
@item frame
|
||||
evaluate expressions for each incoming frame
|
||||
@end table
|
||||
|
||||
Default value is @samp{init}.
|
||||
@end table
|
||||
|
||||
@section phase
|
||||
|
@ -48,6 +48,7 @@ typedef struct PerspectiveContext {
|
||||
int hsub, vsub;
|
||||
int nb_planes;
|
||||
int sense;
|
||||
int eval_mode;
|
||||
|
||||
int (*perspective)(AVFilterContext *ctx,
|
||||
void *arg, int job, int nb_jobs);
|
||||
@ -61,6 +62,12 @@ enum PERSPECTIVESense {
|
||||
PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
|
||||
};
|
||||
|
||||
enum EvalMode {
|
||||
EVAL_MODE_INIT,
|
||||
EVAL_MODE_FRAME,
|
||||
EVAL_MODE_NB
|
||||
};
|
||||
|
||||
static const AVOption perspective_options[] = {
|
||||
{ "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
|
||||
{ "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
|
||||
@ -78,6 +85,9 @@ static const AVOption perspective_options[] = {
|
||||
0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, "sense"},
|
||||
{ "destination", "specify locations in destination to send corners of source",
|
||||
0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, "sense"},
|
||||
{ "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
|
||||
{ "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
|
||||
{ "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
|
||||
|
||||
{ NULL }
|
||||
};
|
||||
@ -115,20 +125,22 @@ static inline double get_coeff(double d)
|
||||
return coeff;
|
||||
}
|
||||
|
||||
static const char *const var_names[] = { "W", "H", NULL };
|
||||
enum { VAR_W, VAR_H, VAR_VARS_NB };
|
||||
static const char *const var_names[] = { "W", "H", "in", "on", NULL };
|
||||
enum { VAR_W, VAR_H, VAR_IN, VAR_ON, VAR_VARS_NB };
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
|
||||
{
|
||||
PerspectiveContext *s = ctx->priv;
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
double (*ref)[2] = s->ref;
|
||||
|
||||
double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
|
||||
[VAR_IN] = inlink->frame_count + 1,
|
||||
[VAR_ON] = outlink->frame_count + 1 };
|
||||
const int h = values[VAR_H];
|
||||
const int w = values[VAR_W];
|
||||
double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
|
||||
double t0, t1, t2, t3;
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
PerspectiveContext *s = ctx->priv;
|
||||
double (*ref)[2] = s->ref;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h };
|
||||
int h = inlink->h;
|
||||
int w = inlink->w;
|
||||
int x, y, i, j, ret;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
@ -144,19 +156,6 @@ static int config_input(AVFilterLink *inlink)
|
||||
}
|
||||
}
|
||||
|
||||
s->hsub = desc->log2_chroma_w;
|
||||
s->vsub = desc->log2_chroma_h;
|
||||
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
|
||||
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
|
||||
return ret;
|
||||
|
||||
s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
|
||||
s->height[0] = s->height[3] = inlink->h;
|
||||
|
||||
s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
|
||||
if (!s->pv)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
switch (s->sense) {
|
||||
case PERSPECTIVE_SENSE_SOURCE:
|
||||
x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
|
||||
@ -223,6 +222,36 @@ static int config_input(AVFilterLink *inlink)
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
PerspectiveContext *s = ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int h = inlink->h;
|
||||
int w = inlink->w;
|
||||
int i, j, ret;
|
||||
s->hsub = desc->log2_chroma_w;
|
||||
s->vsub = desc->log2_chroma_h;
|
||||
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
|
||||
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
|
||||
return ret;
|
||||
|
||||
s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
|
||||
s->height[0] = s->height[3] = inlink->h;
|
||||
|
||||
s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
|
||||
if (!s->pv)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (s->eval_mode == EVAL_MODE_INIT) {
|
||||
if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < SUB_PIXELS; i++){
|
||||
double d = i / (double)SUB_PIXELS;
|
||||
double temp[4];
|
||||
@ -423,6 +452,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
PerspectiveContext *s = ctx->priv;
|
||||
AVFrame *out;
|
||||
int plane;
|
||||
int ret;
|
||||
|
||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
||||
if (!out) {
|
||||
@ -431,6 +461,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
}
|
||||
av_frame_copy_props(out, frame);
|
||||
|
||||
if (s->eval_mode == EVAL_MODE_FRAME) {
|
||||
if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (plane = 0; plane < s->nb_planes; plane++) {
|
||||
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
|
||||
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
|
||||
|
Loading…
Reference in New Issue
Block a user