1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avfilter: ported lenscorrection filter from frei0r

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Daniel Oberhoff 2014-08-12 00:52:45 +02:00 committed by Michael Niedermayer
parent 52b81ff463
commit 9f617a14a0
6 changed files with 248 additions and 0 deletions

View File

@ -3,6 +3,7 @@ releases are sorted from youngest to oldest.
version <next>:
- Icecast protocol
- ported lenscorrection filter from frei0r filter
version 2.3:

View File

@ -354,6 +354,7 @@ Filters:
vf_histogram.c Paul B Mahol
vf_hqx.c Clément Bœsch
vf_il.c Paul B Mahol
vf_lenscorrection.c Daniel Oberhoff
vf_mergeplanes.c Paul B Mahol
vf_psnr.c Paul B Mahol
vf_scale.c Michael Niedermayer

View File

@ -5546,6 +5546,51 @@ kerndeint=map=1
@end example
@end itemize
@section lenscorrection
Correct radial lens distortion
This filter can be used to correct for radial distortion as can result from the use
of wide angle lenses, and thereby re-rectify the image. To find the right parameters
one can use tools available for example as part of opencv or simply trial-and-error.
To use opencv use the calibration sample (under samples/cpp) from the opencv sources
and extract the k1 and k2 coefficients from the resulting matrix.
Note that effectively the same filter is available in the open-source tools Krita and
Digikam from the KDE project.
In contrast to the @ref{vignette} filter, which can also be used to compensate lens errors,
this filter corrects the distortion of the image, whereas @ref{vignette} corrects the
brightness distribution, so you may want to use both filters together in certain
cases, though you will have to take care of ordering, i.e. whether vignetting should
be applied before or after lens correction.
@subsection Options
The filter accepts the following options:
@table @option
@item cx
Relative x-coordinate of the focal point of the image, and thereby the center of the
distrortion. This value has a range [0,1] and is expressed as fractions of the image
width.
@item cy
Relative y-coordinate of the focal point of the image, and thereby the center of the
distrortion. This value has a range [0,1] and is expressed as fractions of the image
height.
@item k1
Coefficient of the quadratic correction term. 0.5 means no correction.
@item k2
Coefficient of the double quadratic correction term. 0.5 means no correction.
@end table
The formula that generates the correction is:
@var{r_src} = @var{r_tgt} * (1 + @var{k1} * (@var{r_tgt} / @var{r_0})^2 + @var{k2} * (@var{r_tgt} / @var{r_0})^4)
where @var{r_0} is halve of the image diagonal and @var{r_src} and @var{r_tgt} are the
distances from the focal point in the source and target images, respectively.
@anchor{lut3d}
@section lut3d
@ -8758,6 +8803,7 @@ For example, to vertically flip a video with @command{ffmpeg}:
ffmpeg -i in.avi -vf "vflip" out.avi
@end example
@anchor{vignette}
@section vignette
Make or reverse a natural vignetting effect.

View File

@ -138,6 +138,7 @@ OBJS-$(CONFIG_IL_FILTER) += vf_il.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o

View File

@ -156,6 +156,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(INTERLACE, interlace, vf);
REGISTER_FILTER(INTERLEAVE, interleave, vf);
REGISTER_FILTER(KERNDEINT, kerndeint, vf);
REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf);
REGISTER_FILTER(LUT3D, lut3d, vf);
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);

View File

@ -0,0 +1,198 @@
/*
* Copyright (C) 2007 Richard Spindler (author of frei0r plugin from which this was derived)
* Copyright (C) 2014 Daniel Oberhoff
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Lenscorrection filter, algorithm from the frei0r plugin with the same name
*/
#include <stdlib.h>
#include <math.h>
#include "libavutil/opt.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct LenscorrectionCtx {
const AVClass *av_class;
unsigned int width;
unsigned int height;
int hsub, vsub;
int nb_planes;
double cx, cy, k1, k2;
} LenscorrectionCtx;
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
static const AVOption lenscorrection_options[] = {
{ "cx", "set relative center x", offsetof(LenscorrectionCtx, cx), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
{ "cy", "set relative center y", offsetof(LenscorrectionCtx, cy), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, .flags=FLAGS },
{ "k1", "set quadratic distortion factor", offsetof(LenscorrectionCtx, k1), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
{ "k2", "set double quadratic distortion factor", offsetof(LenscorrectionCtx, k2), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, -1, 1, .flags=FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(lenscorrection);
typedef struct ThreadData {
AVFrame *in, *out;
float w, h;
int plane;
float xcenter, ycenter;
float k1, k2;
} ThreadData;
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
ThreadData *td = (ThreadData*)arg;
AVFrame *in = td->in;
AVFrame *out = td->out;
const float w = td->w, h = td->h;
const float xcenter = td->xcenter;
const float ycenter = td->ycenter;
const float r2inv = 4.0 / (w * w + h * h);
const float k1 = td->k1;
const float k2 = td->k2;
const int start = (h * job ) / nb_jobs;
const int end = (h * (job+1)) / nb_jobs;
const int plane = td->plane;
const int inlinesize = in->linesize[plane];
const int outlinesize = out->linesize[plane];
const uint8_t *indata = in->data[plane];
uint8_t *outrow = out->data[plane] + start * outlinesize;
int i;
for (i = start; i < end; i++, outrow += outlinesize) {
const float off_y = i - ycenter;
const float off_y2 = off_y * off_y;
uint8_t *out = outrow;
int j;
for (j = 0; j < w; j++) {
const float off_x = j - xcenter;
const float r2 = (off_x * off_x + off_y2) * r2inv;
const float radius_mult = 1.0f + r2 * k1 + r2 * r2 * k2;
const int x = xcenter + radius_mult * off_x + 0.5f;
const int y = ycenter + radius_mult * off_y + 0.5f;
const char isvalid = x > 0 && x < w - 1 && y > 0 && y < h - 1;
*out++ = isvalid ? indata[y * inlinesize + x] : 0;
}
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
static enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_YUV422P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LenscorrectionCtx *rect = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
rect->hsub = pixdesc->log2_chroma_w;
rect->vsub = pixdesc->log2_chroma_h;
outlink->w = rect->width = inlink->w;
outlink->h = rect->height = inlink->h;
rect->nb_planes = av_pix_fmt_count_planes(inlink->format);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
LenscorrectionCtx *rect = (LenscorrectionCtx*)ctx->priv;
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
int plane;
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
for (plane = 0; plane < rect->nb_planes; ++plane) {
int hsub = plane == 1 || plane == 2 ? rect->hsub : 0;
int vsub = plane == 1 || plane == 2 ? rect->vsub : 0;
float hdiv = 1 << hsub;
float vdiv = 1 << vsub;
float w = rect->width / hdiv;
float h = rect->height / vdiv;
ThreadData td = {
.in = in,
.out = out,
.w = w,
.h = h,
.xcenter = rect->cx * w,
.ycenter = rect->cy * h,
.k1 = rect->k1,
.k2 = rect->k2,
.plane = plane};
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ctx->graph->nb_threads));
}
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad lenscorrection_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad lenscorrection_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_vf_lenscorrection = {
.name = "lenscorrection",
.description = NULL_IF_CONFIG_SMALL("Rectify the image by correcting for lens distortion."),
.priv_size = sizeof(LenscorrectionCtx),
.query_formats = query_formats,
.inputs = lenscorrection_inputs,
.outputs = lenscorrection_outputs,
.priv_class = &lenscorrection_class,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};