mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-26 19:01:44 +02:00
avfilter/vf_lut2: implement support for different input depths
This commit is contained in:
parent
1096614c42
commit
02809e7b06
@ -11688,6 +11688,10 @@ set second pixel component expression
|
||||
set third pixel component expression
|
||||
@item c3
|
||||
set fourth pixel component expression, corresponds to the alpha component
|
||||
|
||||
@item d
|
||||
set output bit depth, only available for @code{lut2} filter. By default is 0,
|
||||
which means bit depth is automatically picked from first input format.
|
||||
@end table
|
||||
|
||||
Each of them specifies the expression to use for computing the lookup table for
|
||||
|
@ -54,12 +54,17 @@ typedef struct LUT2Context {
|
||||
const AVClass *class;
|
||||
FFFrameSync fs;
|
||||
|
||||
int odepth;
|
||||
char *comp_expr_str[4];
|
||||
|
||||
AVExpr *comp_expr[4];
|
||||
double var_values[VAR_VARS_NB];
|
||||
uint16_t *lut[4]; ///< lookup table for each component
|
||||
int width[4], height[4];
|
||||
int widthx[4], heightx[4];
|
||||
int widthy[4], heighty[4];
|
||||
int nb_planesx;
|
||||
int nb_planesy;
|
||||
int nb_planes;
|
||||
int depth, depthx, depthy;
|
||||
int tlut2;
|
||||
@ -77,6 +82,7 @@ static const AVOption options[] = {
|
||||
{ "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "d", "set output depth", OFFSET(odepth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 16, .flags = FLAGS },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
@ -96,27 +102,93 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
#define BIT8_FMTS \
|
||||
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, \
|
||||
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, \
|
||||
AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, \
|
||||
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
|
||||
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, \
|
||||
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
|
||||
|
||||
#define BIT9_FMTS \
|
||||
AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, \
|
||||
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, \
|
||||
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
|
||||
|
||||
#define BIT10_FMTS \
|
||||
AV_PIX_FMT_GRAY10, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, \
|
||||
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, \
|
||||
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
|
||||
|
||||
#define BIT12_FMTS \
|
||||
AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, \
|
||||
AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRP12,
|
||||
|
||||
#define BIT14_FMTS \
|
||||
AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, \
|
||||
AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRP14,
|
||||
|
||||
#define BIT16_FMTS \
|
||||
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, \
|
||||
AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, \
|
||||
AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16,
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
static const enum AVPixelFormat pix_fmts[] = {
|
||||
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
|
||||
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
|
||||
AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
|
||||
AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
|
||||
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
|
||||
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
|
||||
AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
|
||||
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
|
||||
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
|
||||
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
|
||||
AV_PIX_FMT_GBRP12,
|
||||
AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12,
|
||||
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
|
||||
LUT2Context *s = ctx->priv;
|
||||
static const enum AVPixelFormat all_pix_fmts[] = {
|
||||
BIT8_FMTS
|
||||
BIT9_FMTS
|
||||
BIT10_FMTS
|
||||
BIT12_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit8_pix_fmts[] = {
|
||||
BIT8_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit9_pix_fmts[] = {
|
||||
BIT9_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit10_pix_fmts[] = {
|
||||
BIT10_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit12_pix_fmts[] = {
|
||||
BIT12_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit14_pix_fmts[] = {
|
||||
BIT14_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
static const enum AVPixelFormat bit16_pix_fmts[] = {
|
||||
BIT16_FMTS
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
const enum AVPixelFormat *pix_fmts;
|
||||
int ret;
|
||||
|
||||
return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
|
||||
if (s->tlut2 || !s->odepth)
|
||||
return ff_set_common_formats(ctx, ff_make_format_list(all_pix_fmts));
|
||||
|
||||
ret = ff_formats_ref(ff_make_format_list(all_pix_fmts), &ctx->inputs[0]->out_formats);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (s->odepth) {
|
||||
case 8: pix_fmts = bit8_pix_fmts; break;
|
||||
case 9: pix_fmts = bit9_pix_fmts; break;
|
||||
case 10: pix_fmts = bit10_pix_fmts; break;
|
||||
case 12: pix_fmts = bit12_pix_fmts; break;
|
||||
case 14: pix_fmts = bit14_pix_fmts; break;
|
||||
case 16: pix_fmts = bit16_pix_fmts; break;
|
||||
default: av_log(ctx, AV_LOG_ERROR, "Unsupported output bit depth %d.\n", s->odepth);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
return ff_formats_ref(ff_make_format_list(pix_fmts), &ctx->outputs[0]->in_formats);
|
||||
}
|
||||
|
||||
static int config_inputx(AVFilterLink *inlink)
|
||||
@ -127,11 +199,11 @@ static int config_inputx(AVFilterLink *inlink)
|
||||
int hsub = desc->log2_chroma_w;
|
||||
int vsub = desc->log2_chroma_h;
|
||||
|
||||
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
|
||||
s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
|
||||
s->height[0] = s->height[3] = inlink->h;
|
||||
s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
|
||||
s->width[0] = s->width[3] = inlink->w;
|
||||
s->nb_planesx = av_pix_fmt_count_planes(inlink->format);
|
||||
s->heightx[1] = s->heightx[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
|
||||
s->heightx[0] = s->heightx[3] = inlink->h;
|
||||
s->widthx[1] = s->widthx[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
|
||||
s->widthx[0] = s->widthx[3] = inlink->w;
|
||||
|
||||
s->var_values[VAR_W] = inlink->w;
|
||||
s->var_values[VAR_H] = inlink->h;
|
||||
@ -151,62 +223,58 @@ static int config_inputy(AVFilterLink *inlink)
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
LUT2Context *s = ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int hsub = desc->log2_chroma_w;
|
||||
int vsub = desc->log2_chroma_h;
|
||||
|
||||
s->nb_planesy = av_pix_fmt_count_planes(inlink->format);
|
||||
s->depthy = desc->comp[0].depth;
|
||||
s->var_values[VAR_BITDEPTHY] = s->depthy;
|
||||
s->heighty[1] = s->heighty[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
|
||||
s->heighty[0] = s->heighty[3] = inlink->h;
|
||||
s->widthy[1] = s->widthy[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
|
||||
s->widthy[0] = s->widthy[3] = inlink->w;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
|
||||
{
|
||||
int p, y, x;
|
||||
|
||||
for (p = 0; p < s->nb_planes; p++) {
|
||||
const uint16_t *lut = s->lut[p];
|
||||
const uint8_t *srcxx, *srcyy;
|
||||
uint8_t *dst;
|
||||
|
||||
dst = out->data[p];
|
||||
srcxx = srcx->data[p];
|
||||
srcyy = srcy->data[p];
|
||||
|
||||
for (y = 0; y < s->height[p]; y++) {
|
||||
for (x = 0; x < s->width[p]; x++) {
|
||||
dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
|
||||
}
|
||||
|
||||
dst += out->linesize[p];
|
||||
srcxx += srcx->linesize[p];
|
||||
srcyy += srcy->linesize[p];
|
||||
}
|
||||
}
|
||||
#define DEFINE_LUT2(zname, xname, yname, ztype, xtype, ytype, zdiv, xdiv, ydiv) \
|
||||
static void lut2_##zname##_##xname##_##yname(struct LUT2Context *s, \
|
||||
AVFrame *out, \
|
||||
AVFrame *srcx, AVFrame *srcy) \
|
||||
{ \
|
||||
const int odepth = s->odepth; \
|
||||
int p, y, x; \
|
||||
\
|
||||
for (p = 0; p < s->nb_planes; p++) { \
|
||||
const uint16_t *lut = s->lut[p]; \
|
||||
const xtype *srcxx; \
|
||||
const ytype *srcyy; \
|
||||
ztype *dst; \
|
||||
\
|
||||
dst = (ztype *)out->data[p]; \
|
||||
srcxx = (const xtype *)srcx->data[p]; \
|
||||
srcyy = (const ytype *)srcy->data[p]; \
|
||||
\
|
||||
for (y = 0; y < s->heightx[p]; y++) { \
|
||||
for (x = 0; x < s->widthx[p]; x++) { \
|
||||
dst[x] = av_clip_uintp2_c(lut[(srcyy[x] << s->depthx) | srcxx[x]], odepth); \
|
||||
} \
|
||||
\
|
||||
dst += out->linesize[p] / zdiv; \
|
||||
srcxx += srcx->linesize[p] / xdiv; \
|
||||
srcyy += srcy->linesize[p] / ydiv; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
|
||||
{
|
||||
int p, y, x;
|
||||
|
||||
for (p = 0; p < s->nb_planes; p++) {
|
||||
const uint16_t *lut = s->lut[p];
|
||||
const uint16_t *srcxx, *srcyy;
|
||||
uint16_t *dst;
|
||||
|
||||
dst = (uint16_t *)out->data[p];
|
||||
srcxx = (uint16_t *)srcx->data[p];
|
||||
srcyy = (uint16_t *)srcy->data[p];
|
||||
|
||||
for (y = 0; y < s->height[p]; y++) {
|
||||
for (x = 0; x < s->width[p]; x++) {
|
||||
dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
|
||||
}
|
||||
|
||||
dst += out->linesize[p] / 2;
|
||||
srcxx += srcx->linesize[p] / 2;
|
||||
srcyy += srcy->linesize[p] / 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
DEFINE_LUT2(8, 8, 8, uint8_t, uint8_t, uint8_t, 1, 1, 1)
|
||||
DEFINE_LUT2(8, 8, 16, uint8_t, uint8_t, uint16_t, 1, 1, 2)
|
||||
DEFINE_LUT2(8, 16, 8, uint8_t, uint16_t, uint8_t, 1, 2, 1)
|
||||
DEFINE_LUT2(8, 16, 16, uint8_t, uint16_t, uint16_t, 1, 2, 2)
|
||||
DEFINE_LUT2(16, 8, 8, uint16_t, uint8_t, uint8_t, 2, 1, 1)
|
||||
DEFINE_LUT2(16, 8, 16, uint16_t, uint8_t, uint16_t, 2, 1, 2)
|
||||
DEFINE_LUT2(16, 16, 8, uint16_t, uint16_t, uint8_t, 2, 2, 1)
|
||||
DEFINE_LUT2(16, 16, 16, uint16_t, uint16_t, uint16_t, 2, 2, 2)
|
||||
|
||||
static int process_frame(FFFrameSync *fs)
|
||||
{
|
||||
@ -245,8 +313,27 @@ static int config_output(AVFilterLink *outlink)
|
||||
int p, ret;
|
||||
|
||||
s->depth = s->depthx + s->depthy;
|
||||
s->nb_planes = s->nb_planesx;
|
||||
|
||||
s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
|
||||
s->lut2 = s->depth > 16 ? lut2_16_16_16 : lut2_8_8_8;
|
||||
if (s->odepth) {
|
||||
if (s->depthx == 8 && s->depthy == 8 && s->odepth > 8)
|
||||
s->lut2 = lut2_16_8_8;
|
||||
if (s->depthx > 8 && s->depthy == 8 && s->odepth > 8)
|
||||
s->lut2 = lut2_16_16_8;
|
||||
if (s->depthx == 8 && s->depthy > 8 && s->odepth > 8)
|
||||
s->lut2 = lut2_16_8_16;
|
||||
if (s->depthx == 8 && s->depthy == 8 && s->odepth == 8)
|
||||
s->lut2 = lut2_8_8_8;
|
||||
if (s->depthx > 8 && s->depthy == 8 && s->odepth == 8)
|
||||
s->lut2 = lut2_8_16_8;
|
||||
if (s->depthx == 8 && s->depthy > 8 && s->odepth == 8)
|
||||
s->lut2 = lut2_8_8_16;
|
||||
if (s->depthx > 8 && s->depthy > 8 && s->odepth == 8)
|
||||
s->lut2 = lut2_8_16_16;
|
||||
} else {
|
||||
s->odepth = s->depthx;
|
||||
}
|
||||
|
||||
for (p = 0; p < s->nb_planes; p++) {
|
||||
s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
|
||||
@ -271,7 +358,7 @@ static int config_output(AVFilterLink *outlink)
|
||||
}
|
||||
|
||||
/* compute the lut */
|
||||
for (y = 0; y < (1 << s->depthx); y++) {
|
||||
for (y = 0; y < (1 << s->depthy); y++) {
|
||||
s->var_values[VAR_Y] = y;
|
||||
for (x = 0; x < (1 << s->depthx); x++) {
|
||||
s->var_values[VAR_X] = x;
|
||||
@ -298,12 +385,28 @@ static int lut2_config_output(AVFilterLink *outlink)
|
||||
AVFilterLink *srcx = ctx->inputs[0];
|
||||
AVFilterLink *srcy = ctx->inputs[1];
|
||||
FFFrameSyncIn *in;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
|
||||
int hsub = desc->log2_chroma_w;
|
||||
int vsub = desc->log2_chroma_h;
|
||||
int ret;
|
||||
|
||||
if (srcx->format != srcy->format) {
|
||||
outlink->w = srcx->w;
|
||||
outlink->h = srcx->h;
|
||||
outlink->time_base = srcx->time_base;
|
||||
outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
|
||||
outlink->frame_rate = srcx->frame_rate;
|
||||
|
||||
s->nb_planes = av_pix_fmt_count_planes(outlink->format);
|
||||
s->height[1] = s->height[2] = AV_CEIL_RSHIFT(outlink->h, vsub);
|
||||
s->height[0] = s->height[3] = outlink->h;
|
||||
s->width[1] = s->width[2] = AV_CEIL_RSHIFT(outlink->w, hsub);
|
||||
s->width[0] = s->width[3] = outlink->w;
|
||||
|
||||
if (!s->odepth && srcx->format != srcy->format) {
|
||||
av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (srcx->w != srcy->w || srcx->h != srcy->h) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
|
||||
"(size %dx%d) do not match the corresponding "
|
||||
@ -314,11 +417,61 @@ static int lut2_config_output(AVFilterLink *outlink)
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
outlink->w = srcx->w;
|
||||
outlink->h = srcx->h;
|
||||
outlink->time_base = srcx->time_base;
|
||||
outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
|
||||
outlink->frame_rate = srcx->frame_rate;
|
||||
if (s->nb_planesx != s->nb_planesy) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
|
||||
"(%d) do not match the corresponding "
|
||||
"second input link %s number of planes (%d)\n",
|
||||
ctx->input_pads[0].name, s->nb_planesx,
|
||||
ctx->input_pads[1].name, s->nb_planesy);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->nb_planesx != s->nb_planes) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
|
||||
"(%d) do not match the corresponding "
|
||||
"output link %s number of planes (%d)\n",
|
||||
ctx->input_pads[0].name, s->nb_planesx,
|
||||
ctx->output_pads[0].name, s->nb_planes);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->widthx[1] != s->widthy[1] || s->heightx[1] != s->heighty[1]) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
|
||||
"(size %dx%d) do not match the corresponding "
|
||||
"second input link %s 2nd plane (size %dx%d)\n",
|
||||
ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
|
||||
ctx->input_pads[1].name,
|
||||
s->widthy[1], s->heighty[1]);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->widthx[2] != s->widthy[2] || s->heightx[2] != s->heighty[2]) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
|
||||
"(size %dx%d) do not match the corresponding "
|
||||
"second input link %s 3rd plane (size %dx%d)\n",
|
||||
ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
|
||||
ctx->input_pads[1].name,
|
||||
s->widthy[2], s->heighty[2]);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->widthx[1] != s->width[1] || s->heightx[1] != s->height[1]) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
|
||||
"(size %dx%d) do not match the corresponding "
|
||||
"output link %s 2nd plane (size %dx%d)\n",
|
||||
ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
|
||||
ctx->output_pads[0].name, s->width[1], s->height[1]);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->widthx[2] != s->width[2] || s->heightx[2] != s->height[2]) {
|
||||
av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
|
||||
"(size %dx%d) do not match the corresponding "
|
||||
"output link %s 3rd plane (size %dx%d)\n",
|
||||
ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
|
||||
ctx->output_pads[0].name, s->width[2], s->height[2]);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
|
||||
return ret;
|
||||
@ -429,7 +582,13 @@ static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define tlut2_options options
|
||||
static const AVOption tlut2_options[] = {
|
||||
{ "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(tlut2);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user