1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-01-08 13:22:53 +02:00

lavfi/cropdetect: Add new mode to detect crop-area based on motion vectors and edges

This filter allows crop detection even if the video is embedded in non-black areas.
This commit is contained in:
Thilo Borgmann 2022-07-30 13:10:45 +02:00
parent cf1f574431
commit 9d66417cc5
6 changed files with 288 additions and 4 deletions

View File

@ -10138,12 +10138,23 @@ Auto-detect the crop size.
It calculates the necessary cropping parameters and prints the
recommended parameters via the logging system. The detected dimensions
correspond to the non-black area of the input video.
correspond to the non-black or video area of the input video according to @var{mode}.
It accepts the following parameters:
@table @option
@item mode
Depending on @var{mode} crop detection is based on either the mere black value of surrounding pixels or a combination of motion vectors and edge pixels.
@table @samp
@item black
Detect black pixels surrounding the playing video. For fine control use option @var{limit}.
@item mvedges
Detect the playing video by the motion vectors inside the video and scanning for edge pixels typically forming the border of a playing video.
@end table
@item limit
Set higher black value threshold, which can be optionally specified
from nothing (0) to everything (255 for 8-bit based formats). An intensity
@ -10169,8 +10180,48 @@ detect the current optimal crop area. Default value is 0.
This can be useful when channel logos distort the video area. 0
indicates 'never reset', and returns the largest area encountered during
playback.
@item mv_threshold
Set motion in pixel units as threshold for motion detection. It defaults to 8.
@item low
@item high
Set low and high threshold values used by the Canny thresholding
algorithm.
The high threshold selects the "strong" edge pixels, which are then
connected through 8-connectivity with the "weak" edge pixels selected
by the low threshold.
@var{low} and @var{high} threshold values must be chosen in the range
[0,1], and @var{low} should be lesser or equal to @var{high}.
Default value for @var{low} is @code{5/255}, and default value for @var{high}
is @code{15/255}.
@end table
@subsection Examples
@itemize
@item
Find video area surrounded by black borders:
@example
ffmpeg -i file.mp4 -vf cropdetect,metadata=mode=print -f null -
@end example
@item
Find an embedded video area, generate motion vectors beforehand:
@example
ffmpeg -i file.mp4 -vf mestimate,cropdetect=mode=mvedges,metadata=mode=print -f null -
@end example
@item
Find an embedded video area, use motion vectors from decoder:
@example
ffmpeg -flags2 +export_mvs -i file.mp4 -vf cropdetect=mode=mvedges,metadata=mode=print -f null -
@end example
@end itemize
@anchor{cue}
@section cue

View File

@ -32,7 +32,7 @@
#include "version_major.h"
#define LIBAVFILTER_VERSION_MINOR 46
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_MICRO 101
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \

View File

@ -26,11 +26,14 @@
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/motion_vector.h"
#include "libavutil/qsort.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#include "edge_common.h"
typedef struct CropDetectContext {
const AVClass *class;
@ -42,6 +45,16 @@ typedef struct CropDetectContext {
int frame_nb;
int max_pixsteps[4];
int max_outliers;
int mode;
int window_size;
int mv_threshold;
float low, high;
uint8_t low_u8, high_u8;
uint8_t *filterbuf;
uint8_t *tmpbuf;
uint16_t *gradients;
char *directions;
int *bboxes[4];
} CropDetectContext;
static const enum AVPixelFormat pix_fmts[] = {
@ -61,6 +74,17 @@ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_NONE
};
enum CropMode {
MODE_BLACK,
MODE_MV_EDGES,
MODE_NB
};
static int comp(const int *a,const int *b)
{
return FFDIFFSIGN(*a, *b);
}
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
{
int total = 0;
@ -116,11 +140,43 @@ static int checkline(void *ctx, const unsigned char *src, int stride, int len, i
return total;
}
static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
{
const uint16_t *src16 = (const uint16_t *)src;
switch (bpp) {
case 1:
while (--len >= 0) {
if (src[0]) return 0;
src += stride;
}
break;
case 2:
stride >>= 1;
while (--len >= 0) {
if (src16[0]) return 0;
src16 += stride;
}
break;
case 3:
case 4:
while (--len >= 0) {
if (src[0] || src[1] || src[2]) return 0;
src += stride;
}
break;
}
return 1;
}
static av_cold int init(AVFilterContext *ctx)
{
CropDetectContext *s = ctx->priv;
s->frame_nb = -1 * s->skip;
s->low_u8 = s->low * 255. + .5;
s->high_u8 = s->high * 255. + .5;
av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d skip:%d reset_count:%d\n",
s->limit, s->round, s->skip, s->reset_count);
@ -128,11 +184,27 @@ static av_cold int init(AVFilterContext *ctx)
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
CropDetectContext *s = ctx->priv;
av_freep(&s->tmpbuf);
av_freep(&s->filterbuf);
av_freep(&s->gradients);
av_freep(&s->directions);
av_freep(&s->bboxes[0]);
av_freep(&s->bboxes[1]);
av_freep(&s->bboxes[2]);
av_freep(&s->bboxes[3]);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
const int bufsize = inlink->w * inlink->h;
int bpp;
av_image_fill_max_pixsteps(s->max_pixsteps, NULL, desc);
@ -144,6 +216,21 @@ static int config_input(AVFilterLink *inlink)
s->x2 = 0;
s->y2 = 0;
bpp = s->max_pixsteps[0];
s->window_size = FFMAX(s->reset_count, 15);
s->tmpbuf = av_malloc(bufsize);
s->filterbuf = av_malloc(bufsize * s->max_pixsteps[0]);
s->gradients = av_calloc(bufsize, sizeof(*s->gradients));
s->directions = av_malloc(bufsize);
s->bboxes[0] = av_malloc(s->window_size * sizeof(*s->bboxes[0]));
s->bboxes[1] = av_malloc(s->window_size * sizeof(*s->bboxes[1]));
s->bboxes[2] = av_malloc(s->window_size * sizeof(*s->bboxes[2]));
s->bboxes[3] = av_malloc(s->window_size * sizeof(*s->bboxes[3]));
if (!s->tmpbuf || !s->filterbuf || !s->gradients || !s->directions ||
!s->bboxes[0] || !s->bboxes[1] || !s->bboxes[2] || !s->bboxes[3])
return AVERROR(ENOMEM);
return 0;
}
@ -155,11 +242,28 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
int bpp = s->max_pixsteps[0];
int w, h, x, y, shrink_by;
int w, h, x, y, shrink_by, i;
AVDictionary **metadata;
int outliers, last_y;
int limit = lrint(s->limit);
const int inw = inlink->w;
const int inh = inlink->h;
uint8_t *tmpbuf = s->tmpbuf;
uint8_t *filterbuf = s->filterbuf;
uint16_t *gradients = s->gradients;
int8_t *directions = s->directions;
const AVFrameSideData *sd = NULL;
int scan_w, scan_h, bboff;
void (*sobel)(int w, int h, uint16_t *dst, int dst_linesize,
int8_t *dir, int dir_linesize,
const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_sobel_16 : &ff_sobel_8;
void (*gaussian_blur)(int w, int h,
uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_gaussian_blur_16 : &ff_gaussian_blur_8;
// ignore first s->skip frames
if (++s->frame_nb > 0) {
metadata = &frame->metadata;
@ -185,11 +289,109 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
last_y = y INC;\
}
if (s->mode == MODE_BLACK) {
FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
} else { // MODE_MV_EDGES
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
s->x1 = 0;
s->y1 = 0;
s->x2 = inw - 1;
s->y2 = inh - 1;
if (!sd) {
av_log(ctx, AV_LOG_WARNING, "Cannot detect: no motion vectors available");
} else {
// gaussian filter to reduce noise
gaussian_blur(inw, inh,
filterbuf, inw*bpp,
frame->data[0], frame->linesize[0], bpp);
// compute the 16-bits gradients and directions for the next step
sobel(inw, inh, gradients, inw, directions, inw, filterbuf, inw*bpp, bpp);
// non_maximum_suppression() will actually keep & clip what's necessary and
// ignore the rest, so we need a clean output buffer
memset(tmpbuf, 0, inw * inh);
ff_non_maximum_suppression(inw, inh, tmpbuf, inw, directions, inw, gradients, inw);
// keep high values, or low values surrounded by high values
ff_double_threshold(s->low_u8, s->high_u8, inw, inh,
tmpbuf, inw, tmpbuf, inw);
// scan all MVs and store bounding box
s->x1 = inw - 1;
s->y1 = inh - 1;
s->x2 = 0;
s->y2 = 0;
for (i = 0; i < sd->size / sizeof(AVMotionVector); i++) {
const AVMotionVector *mv = (const AVMotionVector*)sd->data + i;
const int mx = mv->dst_x - mv->src_x;
const int my = mv->dst_y - mv->src_y;
if (mv->dst_x >= 0 && mv->dst_x < inw &&
mv->dst_y >= 0 && mv->dst_y < inh &&
mv->src_x >= 0 && mv->src_x < inw &&
mv->src_y >= 0 && mv->src_y < inh &&
mx * mx + my * my >= s->mv_threshold * s->mv_threshold) {
s->x1 = mv->dst_x < s->x1 ? mv->dst_x : s->x1;
s->y1 = mv->dst_y < s->y1 ? mv->dst_y : s->y1;
s->x2 = mv->dst_x > s->x2 ? mv->dst_x : s->x2;
s->y2 = mv->dst_y > s->y2 ? mv->dst_y : s->y2;
}
}
// assert x1<x2, y1<y2
if (s->x1 > s->x2) FFSWAP(int, s->x1, s->x2);
if (s->y1 > s->y2) FFSWAP(int, s->y1, s->y2);
// scan outward looking for 0-edge-lines in edge image
scan_w = s->x2 - s->x1;
scan_h = s->y2 - s->y1;
#define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
for (last_y = y = FROM; NOEND; y = y INC) { \
if (checkline_edge(ctx, tmpbuf + STEP0 * y, STEP1, LEN, bpp)) { \
if (last_y INC == y) { \
DST = y; \
break; \
} else \
last_y = y; \
} \
} \
if (!(NOEND)) { \
DST = y -(INC); \
}
FIND_EDGE(s->y1, s->y1, y >= 0, -1, inw, bpp, scan_w);
FIND_EDGE(s->y2, s->y2, y < inh, +1, inw, bpp, scan_w);
FIND_EDGE(s->x1, s->x1, y >= 0, -1, bpp, inw, scan_h);
FIND_EDGE(s->x2, s->x2, y < inw, +1, bpp, inw, scan_h);
// queue bboxes
bboff = (s->frame_nb - 1) % s->window_size;
s->bboxes[0][bboff] = s->x1;
s->bboxes[1][bboff] = s->x2;
s->bboxes[2][bboff] = s->y1;
s->bboxes[3][bboff] = s->y2;
// sort queue
bboff = FFMIN(s->frame_nb, s->window_size);
AV_QSORT(s->bboxes[0], bboff, int, comp);
AV_QSORT(s->bboxes[1], bboff, int, comp);
AV_QSORT(s->bboxes[2], bboff, int, comp);
AV_QSORT(s->bboxes[3], bboff, int, comp);
// return median of window_size elems
s->x1 = s->bboxes[0][bboff/2];
s->x2 = s->bboxes[1][bboff/2];
s->y1 = s->bboxes[2][bboff/2];
s->y2 = s->bboxes[3][bboff/2];
}
}
// round x and y (up), important for yuv colorspaces
// make sure they stay rounded!
@ -243,6 +445,12 @@ static const AVOption cropdetect_options[] = {
{ "skip", "Number of initial frames to skip", OFFSET(skip), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, INT_MAX, FLAGS },
{ "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_BLACK}, 0, MODE_NB-1, FLAGS, "mode" },
{ "black", "detect black pixels surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BLACK}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "mvedges", "detect motion and edged surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MV_EDGES}, INT_MIN, INT_MAX, FLAGS, "mode" },
{ "high", "Set high threshold for edge detection", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=25/255.}, 0, 1, FLAGS },
{ "low", "Set low threshold for edge detection", OFFSET(low), AV_OPT_TYPE_FLOAT, {.dbl=15/255.}, 0, 1, FLAGS },
{ "mv_threshold", "motion vector threshold when estimating video window size", OFFSET(mv_threshold), AV_OPT_TYPE_INT, {.i64=8}, 0, 100, FLAGS},
{ NULL }
};
@ -270,6 +478,7 @@ const AVFilter ff_vf_cropdetect = {
.priv_size = sizeof(CropDetectContext),
.priv_class = &cropdetect_class,
.init = init,
.uninit = uninit,
FILTER_INPUTS(avfilter_vf_cropdetect_inputs),
FILTER_OUTPUTS(avfilter_vf_cropdetect_outputs),
FILTER_PIXFMTS_ARRAY(pix_fmts),

View File

@ -641,11 +641,17 @@ FATE_METADATA_FILTER-$(call ALLYES, $(SCDET_DEPS)) += fate-filter-metadata-scdet
fate-filter-metadata-scdet: SRC = $(TARGET_SAMPLES)/svq3/Vertical400kbit.sorenson3.mov
fate-filter-metadata-scdet: CMD = run $(FILTER_METADATA_COMMAND) "sws_flags=+accurate_rnd+bitexact;movie='$(SRC)',scdet=s=1"
CROPDETECT_DEPS = LAVFI_INDEV FILE_PROTOCOL MOVIE_FILTER CROPDETECT_FILTER \
CROPDETECT_DEPS = LAVFI_INDEV FILE_PROTOCOL MOVIE_FILTER MOVIE_FILTER MESTIMATE_FILTER CROPDETECT_FILTER \
SCALE_FILTER MOV_DEMUXER H264_DECODER
FATE_METADATA_FILTER-$(call ALLYES, $(CROPDETECT_DEPS)) += fate-filter-metadata-cropdetect
fate-filter-metadata-cropdetect: SRC = $(TARGET_SAMPLES)/filter/cropdetect.mp4
fate-filter-metadata-cropdetect: CMD = run $(FILTER_METADATA_COMMAND) "sws_flags=+accurate_rnd+bitexact;movie='$(SRC)',cropdetect=max_outliers=3"
FATE_METADATA_FILTER-$(call ALLYES, $(CROPDETECT_DEPS)) += fate-filter-metadata-cropdetect1
fate-filter-metadata-cropdetect1: SRC = $(TARGET_SAMPLES)/filter/cropdetect1.mp4
fate-filter-metadata-cropdetect1: CMD = run $(FILTER_METADATA_COMMAND) "sws_flags=+accurate_rnd+bitexact;movie='$(SRC)',mestimate,cropdetect=mode=mvedges,metadata=mode=print"
FATE_METADATA_FILTER-$(call ALLYES, $(CROPDETECT_DEPS)) += fate-filter-metadata-cropdetect2
fate-filter-metadata-cropdetect2: SRC = $(TARGET_SAMPLES)/filter/cropdetect2.mp4
fate-filter-metadata-cropdetect2: CMD = run $(FILTER_METADATA_COMMAND) "sws_flags=+accurate_rnd+bitexact;movie='$(SRC)',mestimate,cropdetect=mode=mvedges,metadata=mode=print"
FREEZEDETECT_DEPS = LAVFI_INDEV MPTESTSRC_FILTER SCALE_FILTER FREEZEDETECT_FILTER
FATE_METADATA_FILTER-$(call ALLYES, $(FREEZEDETECT_DEPS)) += fate-filter-metadata-freezedetect

View File

@ -0,0 +1,9 @@
pts=0
pts=1001
pts=2002|tag:lavfi.cropdetect.x1=20|tag:lavfi.cropdetect.x2=851|tag:lavfi.cropdetect.y1=311|tag:lavfi.cropdetect.y2=601|tag:lavfi.cropdetect.w=832|tag:lavfi.cropdetect.h=288|tag:lavfi.cropdetect.x=20|tag:lavfi.cropdetect.y=314
pts=3003|tag:lavfi.cropdetect.x1=20|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=311|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=864|tag:lavfi.cropdetect.h=304|tag:lavfi.cropdetect.x=22|tag:lavfi.cropdetect.y=316
pts=4004|tag:lavfi.cropdetect.x1=0|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=115|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=880|tag:lavfi.cropdetect.h=496|tag:lavfi.cropdetect.x=4|tag:lavfi.cropdetect.y=122
pts=5005|tag:lavfi.cropdetect.x1=20|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=311|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=864|tag:lavfi.cropdetect.h=304|tag:lavfi.cropdetect.x=22|tag:lavfi.cropdetect.y=316
pts=6006|tag:lavfi.cropdetect.x1=0|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=115|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=880|tag:lavfi.cropdetect.h=496|tag:lavfi.cropdetect.x=4|tag:lavfi.cropdetect.y=122
pts=7007|tag:lavfi.cropdetect.x1=0|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=115|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=880|tag:lavfi.cropdetect.h=496|tag:lavfi.cropdetect.x=4|tag:lavfi.cropdetect.y=122
pts=8008|tag:lavfi.cropdetect.x1=0|tag:lavfi.cropdetect.x2=885|tag:lavfi.cropdetect.y1=115|tag:lavfi.cropdetect.y2=621|tag:lavfi.cropdetect.w=880|tag:lavfi.cropdetect.h=496|tag:lavfi.cropdetect.x=4|tag:lavfi.cropdetect.y=122

View File

@ -0,0 +1,9 @@
pts=0
pts=512
pts=1024|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=1221|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=1116|tag:lavfi.cropdetect.w=1200|tag:lavfi.cropdetect.h=1088|tag:lavfi.cropdetect.x=22|tag:lavfi.cropdetect.y=22
pts=1536|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=1257|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=1116|tag:lavfi.cropdetect.w=1232|tag:lavfi.cropdetect.h=1088|tag:lavfi.cropdetect.x=24|tag:lavfi.cropdetect.y=22
pts=2048|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=1221|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=1116|tag:lavfi.cropdetect.w=1200|tag:lavfi.cropdetect.h=1088|tag:lavfi.cropdetect.x=22|tag:lavfi.cropdetect.y=22
pts=2560|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=1221|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=1116|tag:lavfi.cropdetect.w=1200|tag:lavfi.cropdetect.h=1088|tag:lavfi.cropdetect.x=22|tag:lavfi.cropdetect.y=22
pts=3072|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=817|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=937|tag:lavfi.cropdetect.w=784|tag:lavfi.cropdetect.h=912|tag:lavfi.cropdetect.x=28|tag:lavfi.cropdetect.y=22
pts=3584|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=817|tag:lavfi.cropdetect.y1=38|tag:lavfi.cropdetect.y2=937|tag:lavfi.cropdetect.w=784|tag:lavfi.cropdetect.h=896|tag:lavfi.cropdetect.x=28|tag:lavfi.cropdetect.y=40
pts=4096|tag:lavfi.cropdetect.x1=21|tag:lavfi.cropdetect.x2=817|tag:lavfi.cropdetect.y1=15|tag:lavfi.cropdetect.y2=937|tag:lavfi.cropdetect.w=784|tag:lavfi.cropdetect.h=912|tag:lavfi.cropdetect.x=28|tag:lavfi.cropdetect.y=22