You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-07-11 14:30:22 +02:00
avfilter/vf_overlay: split blend_image into functions for each overlay format
Signed-off-by: Paul B Mahol <onemda@gmail.com>
This commit is contained in:
@ -132,6 +132,8 @@ typedef struct OverlayContext {
|
||||
int eof_action; ///< action to take on EOF from source
|
||||
|
||||
AVExpr *x_pexpr, *y_pexpr;
|
||||
|
||||
void (*blend_image)(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y);
|
||||
} OverlayContext;
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
@ -304,22 +306,6 @@ static const enum AVPixelFormat alpha_pix_fmts[] = {
|
||||
AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static int config_input_main(AVFilterLink *inlink)
|
||||
{
|
||||
OverlayContext *s = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
|
||||
|
||||
s->hsub = pix_desc->log2_chroma_w;
|
||||
s->vsub = pix_desc->log2_chroma_h;
|
||||
|
||||
s->main_is_packed_rgb =
|
||||
ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
|
||||
s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int config_input_overlay(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
@ -397,22 +383,17 @@ static int config_output(AVFilterLink *outlink)
|
||||
/**
|
||||
* Blend image in src to destination buffer dst at position (x, y).
|
||||
*/
|
||||
static void blend_image(AVFilterContext *ctx,
|
||||
|
||||
static void blend_image_packed_rgb(AVFilterContext *ctx,
|
||||
AVFrame *dst, const AVFrame *src,
|
||||
int x, int y)
|
||||
{
|
||||
OverlayContext *s = ctx->priv;
|
||||
int i, imax, j, jmax, k, kmax;
|
||||
int i, imax, j, jmax;
|
||||
const int src_w = src->width;
|
||||
const int src_h = src->height;
|
||||
const int dst_w = dst->width;
|
||||
const int dst_h = dst->height;
|
||||
|
||||
if (x >= dst_w || x+src_w < 0 ||
|
||||
y >= dst_h || y+src_h < 0)
|
||||
return; /* no intersection */
|
||||
|
||||
if (s->main_is_packed_rgb) {
|
||||
uint8_t alpha; ///< the amount of overlay to blend on to main
|
||||
const int dr = s->main_rgba_map[R];
|
||||
const int dg = s->main_rgba_map[G];
|
||||
@ -425,7 +406,7 @@ static void blend_image(AVFilterContext *ctx,
|
||||
const int sa = s->overlay_rgba_map[A];
|
||||
const int sstep = s->overlay_pix_step[0];
|
||||
const int main_has_alpha = s->main_has_alpha;
|
||||
uint8_t *s, *sp, *d, *dp;
|
||||
uint8_t *S, *sp, *d, *dp;
|
||||
|
||||
i = FFMAX(-y, 0);
|
||||
sp = src->data[0] + i * src->linesize[0];
|
||||
@ -433,11 +414,11 @@ static void blend_image(AVFilterContext *ctx,
|
||||
|
||||
for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
|
||||
j = FFMAX(-x, 0);
|
||||
s = sp + j * sstep;
|
||||
S = sp + j * sstep;
|
||||
d = dp + (x+j) * dstep;
|
||||
|
||||
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
|
||||
alpha = s[sa];
|
||||
alpha = S[sa];
|
||||
|
||||
// if the main channel has an alpha channel, alpha has to be calculated
|
||||
// to create an un-premultiplied (straight) alpha value
|
||||
@ -450,37 +431,49 @@ static void blend_image(AVFilterContext *ctx,
|
||||
case 0:
|
||||
break;
|
||||
case 255:
|
||||
d[dr] = s[sr];
|
||||
d[dg] = s[sg];
|
||||
d[db] = s[sb];
|
||||
d[dr] = S[sr];
|
||||
d[dg] = S[sg];
|
||||
d[db] = S[sb];
|
||||
break;
|
||||
default:
|
||||
// main_value = main_value * (1 - alpha) + overlay_value * alpha
|
||||
// since alpha is in the range 0-255, the result must divided by 255
|
||||
d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + s[sr] * alpha);
|
||||
d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + s[sg] * alpha);
|
||||
d[db] = FAST_DIV255(d[db] * (255 - alpha) + s[sb] * alpha);
|
||||
d[dr] = FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha);
|
||||
d[dg] = FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha);
|
||||
d[db] = FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha);
|
||||
}
|
||||
if (main_has_alpha) {
|
||||
switch (alpha) {
|
||||
case 0:
|
||||
break;
|
||||
case 255:
|
||||
d[da] = s[sa];
|
||||
d[da] = S[sa];
|
||||
break;
|
||||
default:
|
||||
// apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
|
||||
d[da] += FAST_DIV255((255 - d[da]) * s[sa]);
|
||||
d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
|
||||
}
|
||||
}
|
||||
d += dstep;
|
||||
s += sstep;
|
||||
S += sstep;
|
||||
}
|
||||
dp += dst->linesize[0];
|
||||
sp += src->linesize[0];
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
static void blend_image_yuv(AVFilterContext *ctx,
|
||||
AVFrame *dst, const AVFrame *src,
|
||||
int x, int y)
|
||||
{
|
||||
OverlayContext *s = ctx->priv;
|
||||
int i, imax, j, jmax, k, kmax;
|
||||
const int src_w = src->width;
|
||||
const int src_h = src->height;
|
||||
const int dst_w = dst->width;
|
||||
const int dst_h = dst->height;
|
||||
const int main_has_alpha = s->main_has_alpha;
|
||||
|
||||
if (main_has_alpha) {
|
||||
uint8_t alpha; ///< the amount of overlay to blend on to main
|
||||
uint8_t *s, *sa, *d, *da;
|
||||
@ -582,7 +575,32 @@ static void blend_image(AVFilterContext *ctx,
|
||||
ap += (1 << vsub) * src->linesize[3];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int config_input_main(AVFilterLink *inlink)
|
||||
{
|
||||
OverlayContext *s = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
av_image_fill_max_pixsteps(s->main_pix_step, NULL, pix_desc);
|
||||
|
||||
s->hsub = pix_desc->log2_chroma_w;
|
||||
s->vsub = pix_desc->log2_chroma_h;
|
||||
|
||||
s->main_is_packed_rgb =
|
||||
ff_fill_rgba_map(s->main_rgba_map, inlink->format) >= 0;
|
||||
s->main_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts);
|
||||
switch (s->format) {
|
||||
case OVERLAY_FORMAT_YUV420:
|
||||
case OVERLAY_FORMAT_YUV422:
|
||||
case OVERLAY_FORMAT_YUV444:
|
||||
s->blend_image = blend_image_yuv;
|
||||
break;
|
||||
case OVERLAY_FORMAT_RGB:
|
||||
s->blend_image = blend_image_packed_rgb;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
|
||||
@ -611,7 +629,9 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
|
||||
s->var_values[VAR_Y], s->y);
|
||||
}
|
||||
|
||||
blend_image(ctx, mainpic, second, s->x, s->y);
|
||||
if (s->x < mainpic->width && s->x + second->width >= 0 ||
|
||||
s->y < mainpic->height && s->y + second->height >= 0)
|
||||
s->blend_image(ctx, mainpic, second, s->x, s->y);
|
||||
return mainpic;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user