mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-21 10:55:51 +02:00
ac627b3d38
* commit '716d413c13981da15323c7a3821860536eefdbbb': Replace PIX_FMT_* -> AV_PIX_FMT_*, PixelFormat -> AVPixelFormat Conflicts: doc/examples/muxing.c ffmpeg.h ffmpeg_filter.c ffmpeg_opt.c ffplay.c ffprobe.c libavcodec/8bps.c libavcodec/aasc.c libavcodec/aura.c libavcodec/avcodec.h libavcodec/avs.c libavcodec/bfi.c libavcodec/bmp.c libavcodec/bmpenc.c libavcodec/c93.c libavcodec/cscd.c libavcodec/cyuv.c libavcodec/dpx.c libavcodec/dpxenc.c libavcodec/eatgv.c libavcodec/escape124.c libavcodec/ffv1.c libavcodec/flashsv.c libavcodec/fraps.c libavcodec/h264.c libavcodec/huffyuv.c libavcodec/iff.c libavcodec/imgconvert.c libavcodec/indeo3.c libavcodec/kmvc.c libavcodec/libopenjpegdec.c libavcodec/libopenjpegenc.c libavcodec/libx264.c libavcodec/ljpegenc.c libavcodec/mjpegdec.c libavcodec/mjpegenc.c libavcodec/motionpixels.c libavcodec/mpeg12.c libavcodec/mpeg12enc.c libavcodec/mpeg4videodec.c libavcodec/mpegvideo_enc.c libavcodec/pamenc.c libavcodec/pcxenc.c libavcodec/pgssubdec.c libavcodec/pngdec.c libavcodec/pngenc.c libavcodec/pnm.c libavcodec/pnmdec.c libavcodec/pnmenc.c libavcodec/ptx.c libavcodec/qdrw.c libavcodec/qpeg.c libavcodec/qtrleenc.c libavcodec/raw.c libavcodec/rawdec.c libavcodec/rl2.c libavcodec/sgidec.c libavcodec/sgienc.c libavcodec/snowdec.c libavcodec/snowenc.c libavcodec/sunrast.c libavcodec/targa.c libavcodec/targaenc.c libavcodec/tiff.c libavcodec/tiffenc.c libavcodec/tmv.c libavcodec/truemotion2.c libavcodec/utils.c libavcodec/vb.c libavcodec/vp3.c libavcodec/wnv1.c libavcodec/xl.c libavcodec/xwddec.c libavcodec/xwdenc.c libavcodec/yop.c libavdevice/v4l2.c libavdevice/x11grab.c libavfilter/avfilter.c libavfilter/avfilter.h libavfilter/buffersrc.c libavfilter/drawutils.c libavfilter/formats.c libavfilter/src_movie.c libavfilter/vf_ass.c libavfilter/vf_drawtext.c libavfilter/vf_fade.c libavfilter/vf_format.c libavfilter/vf_hflip.c libavfilter/vf_lut.c libavfilter/vf_overlay.c libavfilter/vf_pad.c libavfilter/vf_scale.c libavfilter/vf_transpose.c libavfilter/vf_yadif.c libavfilter/video.c libavfilter/vsrc_testsrc.c libavformat/movenc.c libavformat/mxf.h libavformat/utils.c libavformat/yuv4mpeg.c libavutil/imgutils.c libavutil/pixdesc.c libswscale/input.c libswscale/output.c libswscale/swscale_internal.h libswscale/swscale_unscaled.c libswscale/utils.c libswscale/x86/swscale_template.c libswscale/x86/yuv2rgb.c libswscale/x86/yuv2rgb_template.c libswscale/yuv2rgb.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
304 lines
10 KiB
C
304 lines
10 KiB
C
/*
|
|
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
|
|
* Copyright (c) 2012 Jeremy Tran
|
|
*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along
|
|
* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* Apply a smartblur filter to the input video
|
|
* Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
|
|
*/
|
|
|
|
#include "libavutil/pixdesc.h"
|
|
#include "libswscale/swscale.h"
|
|
|
|
#include "avfilter.h"
|
|
#include "formats.h"
|
|
#include "internal.h"
|
|
|
|
#define RADIUS_MIN 0.1
|
|
#define RADIUS_MAX 5.0
|
|
|
|
#define STRENGTH_MIN -1.0
|
|
#define STRENGTH_MAX 1.0
|
|
|
|
#define THRESHOLD_MIN -30
|
|
#define THRESHOLD_MAX 30
|
|
|
|
typedef struct {
|
|
float radius;
|
|
float strength;
|
|
int threshold;
|
|
float quality;
|
|
struct SwsContext *filter_context;
|
|
} FilterParam;
|
|
|
|
typedef struct {
|
|
FilterParam luma;
|
|
FilterParam chroma;
|
|
int hsub;
|
|
int vsub;
|
|
unsigned int sws_flags;
|
|
} SmartblurContext;
|
|
|
|
#define CHECK_PARAM(param, name, min, max, format, ret) \
|
|
if (param < min || param > max) { \
|
|
av_log(ctx, AV_LOG_ERROR, \
|
|
"Invalid " #name " value " #format ": " \
|
|
"must be included between range " #format " and " #format "\n",\
|
|
param, min, max); \
|
|
ret = AVERROR(EINVAL); \
|
|
}
|
|
|
|
static av_cold int init(AVFilterContext *ctx, const char *args)
|
|
{
|
|
SmartblurContext *sblur = ctx->priv;
|
|
int n = 0, ret = 0;
|
|
float lradius, lstrength, cradius, cstrength;
|
|
int lthreshold, cthreshold;
|
|
|
|
if (args)
|
|
n = sscanf(args, "%f:%f:%d:%f:%f:%d",
|
|
&lradius, &lstrength, <hreshold,
|
|
&cradius, &cstrength, &cthreshold);
|
|
|
|
if (n != 3 && n != 6) {
|
|
av_log(ctx, AV_LOG_ERROR,
|
|
"Incorrect number of parameters or invalid syntax: "
|
|
"must be luma_radius:luma_strength:luma_threshold"
|
|
"[:chroma_radius:chroma_strength:chroma_threshold]\n");
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
sblur->luma.radius = lradius;
|
|
sblur->luma.strength = lstrength;
|
|
sblur->luma.threshold = lthreshold;
|
|
|
|
if (n == 3) {
|
|
sblur->chroma.radius = sblur->luma.radius;
|
|
sblur->chroma.strength = sblur->luma.strength;
|
|
sblur->chroma.threshold = sblur->luma.threshold;
|
|
} else {
|
|
sblur->chroma.radius = cradius;
|
|
sblur->chroma.strength = cstrength;
|
|
sblur->chroma.threshold = cthreshold;
|
|
}
|
|
|
|
sblur->luma.quality = sblur->chroma.quality = 3.0;
|
|
sblur->sws_flags = SWS_BICUBIC;
|
|
|
|
CHECK_PARAM(lradius, luma radius, RADIUS_MIN, RADIUS_MAX, %0.1f, ret)
|
|
CHECK_PARAM(lstrength, luma strength, STRENGTH_MIN, STRENGTH_MAX, %0.1f, ret)
|
|
CHECK_PARAM(lthreshold, luma threshold, THRESHOLD_MIN, THRESHOLD_MAX, %d, ret)
|
|
|
|
if (n != 3) {
|
|
CHECK_PARAM(sblur->chroma.radius, chroma radius, RADIUS_MIN, RADIUS_MAX, %0.1f, ret)
|
|
CHECK_PARAM(sblur->chroma.strength, chroma strength, STRENGTH_MIN, STRENGTH_MAX, %0.1f, ret)
|
|
CHECK_PARAM(sblur->chroma.threshold, chroma threshold, THRESHOLD_MIN,THRESHOLD_MAX, %d, ret)
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static av_cold void uninit(AVFilterContext *ctx)
|
|
{
|
|
SmartblurContext *sblur = ctx->priv;
|
|
|
|
sws_freeContext(sblur->luma.filter_context);
|
|
sws_freeContext(sblur->chroma.filter_context);
|
|
}
|
|
|
|
static int query_formats(AVFilterContext *ctx)
|
|
{
|
|
static const enum AVPixelFormat pix_fmts[] = {
|
|
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
|
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
|
|
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
|
|
AV_PIX_FMT_GRAY8,
|
|
AV_PIX_FMT_NONE
|
|
};
|
|
|
|
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
|
|
{
|
|
SwsVector *vec;
|
|
SwsFilter sws_filter;
|
|
|
|
vec = sws_getGaussianVec(f->radius, f->quality);
|
|
|
|
if (!vec)
|
|
return AVERROR(EINVAL);
|
|
|
|
sws_scaleVec(vec, f->strength);
|
|
vec->coeff[vec->length / 2] += 1.0 - f->strength;
|
|
sws_filter.lumH = sws_filter.lumV = vec;
|
|
sws_filter.chrH = sws_filter.chrV = NULL;
|
|
f->filter_context = sws_getCachedContext(NULL,
|
|
width, height, AV_PIX_FMT_GRAY8,
|
|
width, height, AV_PIX_FMT_GRAY8,
|
|
flags, &sws_filter, NULL, NULL);
|
|
|
|
sws_freeVec(vec);
|
|
|
|
if (!f->filter_context)
|
|
return AVERROR(EINVAL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int config_props(AVFilterLink *inlink)
|
|
{
|
|
SmartblurContext *sblur = inlink->dst->priv;
|
|
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
|
|
|
|
sblur->hsub = desc->log2_chroma_w;
|
|
sblur->vsub = desc->log2_chroma_h;
|
|
|
|
alloc_sws_context(&sblur->luma, inlink->w, inlink->h, sblur->sws_flags);
|
|
alloc_sws_context(&sblur->chroma,
|
|
inlink->w >> sblur->hsub, inlink->h >> sblur->vsub,
|
|
sblur->sws_flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void blur(uint8_t *dst, const int dst_linesize,
|
|
const uint8_t *src, const int src_linesize,
|
|
const int w, const int h, const int threshold,
|
|
struct SwsContext *filter_context)
|
|
{
|
|
int x, y;
|
|
int orig, filtered;
|
|
int diff;
|
|
/* Declare arrays of 4 to get aligned data */
|
|
const uint8_t* const src_array[4] = {src};
|
|
uint8_t *dst_array[4] = {dst};
|
|
int src_linesize_array[4] = {src_linesize};
|
|
int dst_linesize_array[4] = {dst_linesize};
|
|
|
|
sws_scale(filter_context, src_array, src_linesize_array,
|
|
0, h, dst_array, dst_linesize_array);
|
|
|
|
if (threshold > 0) {
|
|
for (y = 0; y < h; ++y) {
|
|
for (x = 0; x < w; ++x) {
|
|
orig = src[x + y * src_linesize];
|
|
filtered = dst[x + y * dst_linesize];
|
|
diff = orig - filtered;
|
|
|
|
if (diff > 0) {
|
|
if (diff > 2 * threshold)
|
|
dst[x + y * dst_linesize] = orig;
|
|
else if (diff > threshold)
|
|
/* add 'diff' and substract 'threshold' from 'filtered' */
|
|
dst[x + y * dst_linesize] = orig - threshold;
|
|
} else {
|
|
if (-diff > 2 * threshold)
|
|
dst[x + y * dst_linesize] = orig;
|
|
else if (-diff > threshold)
|
|
/* add 'diff' and 'threshold' to 'filtered' */
|
|
dst[x + y * dst_linesize] = orig + threshold;
|
|
}
|
|
}
|
|
}
|
|
} else if (threshold < 0) {
|
|
for (y = 0; y < h; ++y) {
|
|
for (x = 0; x < w; ++x) {
|
|
orig = src[x + y * src_linesize];
|
|
filtered = dst[x + y * dst_linesize];
|
|
diff = orig - filtered;
|
|
|
|
if (diff > 0) {
|
|
if (diff <= -threshold)
|
|
dst[x + y * dst_linesize] = orig;
|
|
else if (diff <= -2 * threshold)
|
|
/* substract 'diff' and 'threshold' from 'orig' */
|
|
dst[x + y * dst_linesize] = filtered - threshold;
|
|
} else {
|
|
if (diff >= threshold)
|
|
dst[x + y * dst_linesize] = orig;
|
|
else if (diff >= 2 * threshold)
|
|
/* add 'threshold' and substract 'diff' from 'orig' */
|
|
dst[x + y * dst_linesize] = filtered + threshold;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static int end_frame(AVFilterLink *inlink)
|
|
{
|
|
SmartblurContext *sblur = inlink->dst->priv;
|
|
AVFilterBufferRef *inpic = inlink->cur_buf;
|
|
AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf;
|
|
int cw = inlink->w >> sblur->hsub;
|
|
int ch = inlink->h >> sblur->vsub;
|
|
|
|
blur(outpic->data[0], outpic->linesize[0],
|
|
inpic->data[0], inpic->linesize[0],
|
|
inlink->w, inlink->h, sblur->luma.threshold,
|
|
sblur->luma.filter_context);
|
|
|
|
if (inpic->data[2]) {
|
|
blur(outpic->data[1], outpic->linesize[1],
|
|
inpic->data[1], inpic->linesize[1],
|
|
cw, ch, sblur->chroma.threshold,
|
|
sblur->chroma.filter_context);
|
|
blur(outpic->data[2], outpic->linesize[2],
|
|
inpic->data[2], inpic->linesize[2],
|
|
cw, ch, sblur->chroma.threshold,
|
|
sblur->chroma.filter_context);
|
|
}
|
|
|
|
return ff_end_frame(inlink->dst->outputs[0]);
|
|
}
|
|
|
|
AVFilter avfilter_vf_smartblur = {
|
|
.name = "smartblur",
|
|
.description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
|
|
|
|
.priv_size = sizeof(SmartblurContext),
|
|
|
|
.init = init,
|
|
.uninit = uninit,
|
|
.query_formats = query_formats,
|
|
|
|
.inputs = (const AVFilterPad[]) {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
.end_frame = end_frame,
|
|
.config_props = config_props,
|
|
.min_perms = AV_PERM_READ,
|
|
},
|
|
{ .name = NULL }
|
|
},
|
|
.outputs = (const AVFilterPad[]) {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
},
|
|
{ .name = NULL }
|
|
}
|
|
};
|