1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

avfilter/edgedetect: make processing functions context free.

This commit is contained in:
Clément Bœsch 2014-05-03 14:57:51 +02:00
parent 3c3e02b8d1
commit 3f3c3318be

View File

@ -154,15 +154,16 @@ static int get_rounded_direction(int gx, int gy)
return DIRECTION_VERTICAL;
}
static void sobel(AVFilterContext *ctx, int w, int h,
uint16_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize)
static void sobel(int w, int h,
uint16_t *dst, int dst_linesize,
int8_t *dir, int dir_linesize,
const uint8_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
for (j = 1; j < h - 1; j++) {
dst += dst_linesize;
dir += dir_linesize;
src += src_linesize;
for (i = 1; i < w - 1; i++) {
const int gx =
@ -175,17 +176,17 @@ static void sobel(AVFilterContext *ctx, int w, int h,
-1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
dst[i] = FFABS(gx) + FFABS(gy);
edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
dir[i] = get_rounded_direction(gx, gy);
}
}
}
static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
static void non_maximum_suppression(int w, int h,
uint8_t *dst, int dst_linesize,
const int8_t *dir, int dir_linesize,
const uint16_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
#define COPY_MAXIMA(ay, ax, by, bx) do { \
if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
@ -195,9 +196,10 @@ static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
for (j = 1; j < h - 1; j++) {
dst += dst_linesize;
dir += dir_linesize;
src += src_linesize;
for (i = 1; i < w - 1; i++) {
switch (edgedetect->directions[j*w + i]) {
switch (dir[i]) {
case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
@ -207,14 +209,11 @@ static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
}
}
static void double_threshold(AVFilterContext *ctx, int w, int h,
static void double_threshold(int low, int high, int w, int h,
uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize)
{
int i, j;
EdgeDetectContext *edgedetect = ctx->priv;
const int low = edgedetect->low_u8;
const int high = edgedetect->high_u8;
for (j = 0; j < h; j++) {
for (i = 0; i < w; i++) {
@ -249,6 +248,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterLink *outlink = inlink->dst->outputs[0];
uint8_t *tmpbuf = edgedetect->tmpbuf;
uint16_t *gradients = edgedetect->gradients;
int8_t *directions= edgedetect->directions;
int direct = 0;
AVFrame *out;
@ -270,19 +270,22 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
in->data[0], in->linesize[0]);
/* compute the 16-bits gradients and directions for the next step */
sobel(ctx, inlink->w, inlink->h,
sobel(inlink->w, inlink->h,
gradients, inlink->w,
directions,inlink->w,
tmpbuf, inlink->w);
/* non_maximum_suppression() will actually keep & clip what's necessary and
* ignore the rest, so we need a clean output buffer */
memset(tmpbuf, 0, inlink->w * inlink->h);
non_maximum_suppression(ctx, inlink->w, inlink->h,
non_maximum_suppression(inlink->w, inlink->h,
tmpbuf, inlink->w,
directions,inlink->w,
gradients, inlink->w);
/* keep high values, or low values surrounded by high values */
double_threshold(ctx, inlink->w, inlink->h,
double_threshold(edgedetect->low_u8, edgedetect->high_u8,
inlink->w, inlink->h,
out->data[0], out->linesize[0],
tmpbuf, inlink->w);