You've already forked FFmpeg
							
							
				mirror of
				https://github.com/FFmpeg/FFmpeg.git
				synced 2025-10-30 23:18:11 +02:00 
			
		
		
		
	lavfi/vf_paletteuse: convert to framesync2.
This commit is contained in:
		| @@ -249,7 +249,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o framesync2.o | ||||
| OBJS-$(CONFIG_OWDENOISE_FILTER)              += vf_owdenoise.o | ||||
| OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o | ||||
| OBJS-$(CONFIG_PALETTEGEN_FILTER)             += vf_palettegen.o | ||||
| OBJS-$(CONFIG_PALETTEUSE_FILTER)             += vf_paletteuse.o dualinput.o framesync.o | ||||
| OBJS-$(CONFIG_PALETTEUSE_FILTER)             += vf_paletteuse.o framesync2.o | ||||
| OBJS-$(CONFIG_PERMS_FILTER)                  += f_perms.o | ||||
| OBJS-$(CONFIG_PERSPECTIVE_FILTER)            += vf_perspective.o | ||||
| OBJS-$(CONFIG_PHASE_FILTER)                  += vf_phase.o | ||||
|   | ||||
| @@ -27,8 +27,10 @@ | ||||
| #include "libavutil/internal.h" | ||||
| #include "libavutil/opt.h" | ||||
| #include "libavutil/qsort.h" | ||||
| #include "dualinput.h" | ||||
| #include "avfilter.h" | ||||
| #include "filters.h" | ||||
| #include "framesync2.h" | ||||
| #include "internal.h" | ||||
|  | ||||
| enum dithering_mode { | ||||
|     DITHERING_NONE, | ||||
| @@ -80,7 +82,7 @@ typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame | ||||
|  | ||||
| typedef struct PaletteUseContext { | ||||
|     const AVClass *class; | ||||
|     FFDualInputContext dinput; | ||||
|     FFFrameSync fs; | ||||
|     struct cache_node cache[CACHE_SIZE];    /* lookup cache */ | ||||
|     struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */ | ||||
|     uint32_t palette[AVPALETTE_COUNT]; | ||||
| @@ -129,6 +131,8 @@ static const AVOption paletteuse_options[] = { | ||||
|  | ||||
| AVFILTER_DEFINE_CLASS(paletteuse); | ||||
|  | ||||
| static int load_apply_palette(FFFrameSync *fs); | ||||
|  | ||||
| static int query_formats(AVFilterContext *ctx) | ||||
| { | ||||
|     static const enum AVPixelFormat in_fmts[]    = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE}; | ||||
| @@ -900,11 +904,18 @@ static int config_output(AVFilterLink *outlink) | ||||
|     AVFilterContext *ctx = outlink->src; | ||||
|     PaletteUseContext *s = ctx->priv; | ||||
|  | ||||
|     ret = ff_framesync2_init_dualinput(&s->fs, ctx); | ||||
|     if (ret < 0) | ||||
|         return ret; | ||||
|     s->fs.opt_repeatlast = 1; // only 1 frame in the palette | ||||
|     s->fs.in[1].before = s->fs.in[1].after = EXT_INFINITY; | ||||
|     s->fs.on_event = load_apply_palette; | ||||
|  | ||||
|     outlink->w = ctx->inputs[0]->w; | ||||
|     outlink->h = ctx->inputs[0]->h; | ||||
|  | ||||
|     outlink->time_base = ctx->inputs[0]->time_base; | ||||
|     if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) | ||||
|     if ((ret = ff_framesync2_configure(&s->fs)) < 0) | ||||
|         return ret; | ||||
|     return 0; | ||||
| } | ||||
| @@ -951,21 +962,32 @@ static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame) | ||||
|         s->palette_loaded = 1; | ||||
| } | ||||
|  | ||||
| static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main, | ||||
|                                    const AVFrame *second) | ||||
| static int load_apply_palette(FFFrameSync *fs) | ||||
| { | ||||
|     AVFilterContext *ctx = fs->parent; | ||||
|     AVFilterLink *inlink = ctx->inputs[0]; | ||||
|     PaletteUseContext *s = ctx->priv; | ||||
|     AVFrame *main, *second, *out; | ||||
|     int ret; | ||||
|  | ||||
|     // writable for error diffusal dithering | ||||
|     ret = ff_framesync2_dualinput_get_writable(fs, &main, &second); | ||||
|     if (ret < 0) | ||||
|         return ret; | ||||
|     if (!main || !second) { | ||||
|         ret = AVERROR_BUG; | ||||
|         goto error; | ||||
|     } | ||||
|     if (!s->palette_loaded) { | ||||
|         load_palette(s, second); | ||||
|     } | ||||
|     return apply_palette(inlink, main); | ||||
| } | ||||
|     out = apply_palette(inlink, main); | ||||
|     return ff_filter_frame(ctx->outputs[0], out); | ||||
|  | ||||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
| { | ||||
|     PaletteUseContext *s = inlink->dst->priv; | ||||
|     return ff_dualinput_filter_frame(&s->dinput, inlink, in); | ||||
| error: | ||||
|     av_frame_free(&main); | ||||
|     av_frame_free(&second); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| #define DEFINE_SET_FRAME(color_search, name, value)                             \ | ||||
| @@ -1013,9 +1035,6 @@ static int dither_value(int p) | ||||
| static av_cold int init(AVFilterContext *ctx) | ||||
| { | ||||
|     PaletteUseContext *s = ctx->priv; | ||||
|     s->dinput.repeatlast = 1; // only 1 frame in the palette | ||||
|     s->dinput.skip_initial_unpaired = 1; | ||||
|     s->dinput.process    = load_apply_palette; | ||||
|  | ||||
|     s->set_frame = set_frame_lut[s->color_search_method][s->dither]; | ||||
|  | ||||
| @@ -1030,10 +1049,10 @@ static av_cold int init(AVFilterContext *ctx) | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static int request_frame(AVFilterLink *outlink) | ||||
| static int activate(AVFilterContext *ctx) | ||||
| { | ||||
|     PaletteUseContext *s = outlink->src->priv; | ||||
|     return ff_dualinput_request_frame(&s->dinput, outlink); | ||||
|     PaletteUseContext *s = ctx->priv; | ||||
|     return ff_framesync2_activate(&s->fs); | ||||
| } | ||||
|  | ||||
| static av_cold void uninit(AVFilterContext *ctx) | ||||
| @@ -1041,7 +1060,7 @@ static av_cold void uninit(AVFilterContext *ctx) | ||||
|     int i; | ||||
|     PaletteUseContext *s = ctx->priv; | ||||
|  | ||||
|     ff_dualinput_uninit(&s->dinput); | ||||
|     ff_framesync2_uninit(&s->fs); | ||||
|     for (i = 0; i < CACHE_SIZE; i++) | ||||
|         av_freep(&s->cache[i].entries); | ||||
|     av_frame_free(&s->last_in); | ||||
| @@ -1052,13 +1071,10 @@ static const AVFilterPad paletteuse_inputs[] = { | ||||
|     { | ||||
|         .name           = "default", | ||||
|         .type           = AVMEDIA_TYPE_VIDEO, | ||||
|         .filter_frame   = filter_frame, | ||||
|         .needs_writable = 1, // for error diffusal dithering | ||||
|     },{ | ||||
|         .name           = "palette", | ||||
|         .type           = AVMEDIA_TYPE_VIDEO, | ||||
|         .config_props   = config_input_palette, | ||||
|         .filter_frame   = filter_frame, | ||||
|     }, | ||||
|     { NULL } | ||||
| }; | ||||
| @@ -1068,7 +1084,6 @@ static const AVFilterPad paletteuse_outputs[] = { | ||||
|         .name          = "default", | ||||
|         .type          = AVMEDIA_TYPE_VIDEO, | ||||
|         .config_props  = config_output, | ||||
|         .request_frame = request_frame, | ||||
|     }, | ||||
|     { NULL } | ||||
| }; | ||||
| @@ -1080,6 +1095,7 @@ AVFilter ff_vf_paletteuse = { | ||||
|     .query_formats = query_formats, | ||||
|     .init          = init, | ||||
|     .uninit        = uninit, | ||||
|     .activate      = activate, | ||||
|     .inputs        = paletteuse_inputs, | ||||
|     .outputs       = paletteuse_outputs, | ||||
|     .priv_class    = &paletteuse_class, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user