You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-10-06 05:47:18 +02:00
The semantics of these keywords are well-defined by the CSS 'object-fit' property. This is arguably more user-friendly and less obtuse than the existing `normalize_sar` and `pad_crop_ratio` options. Additionally, this comes with two new (useful) behaviors, `none` and `scale_down`, neither of which map elegantly to the existing options. One additional benefit of this option is that, unlike `normalize_sar`, it does *not* also imply `reset_sar`; meaning that users can now choose to have an anamorphic base layer and still have the overlay images scaled to fit on top of it according to the chosen strategy. See-Also: https://drafts.csswg.org/css-images/#the-object-fit
1792 lines
77 KiB
C
1792 lines
77 KiB
C
/*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include <math.h>
|
|
|
|
#include "libavutil/avassert.h"
|
|
#include "libavutil/eval.h"
|
|
#include "libavutil/fifo.h"
|
|
#include "libavutil/file.h"
|
|
#include "libavutil/frame.h"
|
|
#include "libavutil/mem.h"
|
|
#include "libavutil/opt.h"
|
|
#include "libavutil/parseutils.h"
|
|
#include "formats.h"
|
|
#include "filters.h"
|
|
#include "video.h"
|
|
#include "vulkan_filter.h"
|
|
#include "scale_eval.h"
|
|
|
|
#include <libplacebo/renderer.h>
|
|
#include <libplacebo/utils/libav.h>
|
|
#include <libplacebo/utils/frame_queue.h>
|
|
#include <libplacebo/vulkan.h>
|
|
|
|
/* Backwards compatibility with older libplacebo */
|
|
#if PL_API_VER < 276
|
|
static inline AVFrame *pl_get_mapped_avframe(const struct pl_frame *frame)
|
|
{
|
|
return frame->user_data;
|
|
}
|
|
#endif
|
|
|
|
#if PL_API_VER >= 309
|
|
#include <libplacebo/options.h>
|
|
#else
|
|
typedef struct pl_options_t {
|
|
// Backwards compatibility shim of this struct
|
|
struct pl_render_params params;
|
|
struct pl_deinterlace_params deinterlace_params;
|
|
struct pl_deband_params deband_params;
|
|
struct pl_sigmoid_params sigmoid_params;
|
|
struct pl_color_adjustment color_adjustment;
|
|
struct pl_peak_detect_params peak_detect_params;
|
|
struct pl_color_map_params color_map_params;
|
|
struct pl_dither_params dither_params;
|
|
struct pl_cone_params cone_params;
|
|
} *pl_options;
|
|
|
|
#define pl_options_alloc(log) av_mallocz(sizeof(struct pl_options_t))
|
|
#define pl_options_free(ptr) av_freep(ptr)
|
|
#endif
|
|
|
|
enum {
|
|
TONE_MAP_AUTO,
|
|
TONE_MAP_CLIP,
|
|
TONE_MAP_ST2094_40,
|
|
TONE_MAP_ST2094_10,
|
|
TONE_MAP_BT2390,
|
|
TONE_MAP_BT2446A,
|
|
TONE_MAP_SPLINE,
|
|
TONE_MAP_REINHARD,
|
|
TONE_MAP_MOBIUS,
|
|
TONE_MAP_HABLE,
|
|
TONE_MAP_GAMMA,
|
|
TONE_MAP_LINEAR,
|
|
TONE_MAP_COUNT,
|
|
};
|
|
|
|
enum {
|
|
GAMUT_MAP_CLIP,
|
|
GAMUT_MAP_PERCEPTUAL,
|
|
GAMUT_MAP_RELATIVE,
|
|
GAMUT_MAP_SATURATION,
|
|
GAMUT_MAP_ABSOLUTE,
|
|
GAMUT_MAP_DESATURATE,
|
|
GAMUT_MAP_DARKEN,
|
|
GAMUT_MAP_HIGHLIGHT,
|
|
GAMUT_MAP_LINEAR,
|
|
GAMUT_MAP_COUNT,
|
|
};
|
|
|
|
static const char *const var_names[] = {
|
|
"in_idx", "idx",///< index of input
|
|
"in_w", "iw", ///< width of the input video frame
|
|
"in_h", "ih", ///< height of the input video frame
|
|
"out_w", "ow", ///< width of the output video frame
|
|
"out_h", "oh", ///< height of the output video frame
|
|
"crop_w", "cw", ///< evaluated input crop width
|
|
"crop_h", "ch", ///< evaluated input crop height
|
|
"pos_w", "pw", ///< evaluated output placement width
|
|
"pos_h", "ph", ///< evaluated output placement height
|
|
"a", ///< iw/ih
|
|
"sar", ///< input pixel aspect ratio
|
|
"dar", ///< output pixel aspect ratio
|
|
"hsub", ///< input horizontal subsampling factor
|
|
"vsub", ///< input vertical subsampling factor
|
|
"ohsub", ///< output horizontal subsampling factor
|
|
"ovsub", ///< output vertical subsampling factor
|
|
"in_t", "t", ///< input frame pts
|
|
"out_t", "ot", ///< output frame pts
|
|
"n", ///< number of frame
|
|
NULL,
|
|
};
|
|
|
|
enum var_name {
|
|
VAR_IN_IDX, VAR_IDX,
|
|
VAR_IN_W, VAR_IW,
|
|
VAR_IN_H, VAR_IH,
|
|
VAR_OUT_W, VAR_OW,
|
|
VAR_OUT_H, VAR_OH,
|
|
VAR_CROP_W, VAR_CW,
|
|
VAR_CROP_H, VAR_CH,
|
|
VAR_POS_W, VAR_PW,
|
|
VAR_POS_H, VAR_PH,
|
|
VAR_A,
|
|
VAR_SAR,
|
|
VAR_DAR,
|
|
VAR_HSUB,
|
|
VAR_VSUB,
|
|
VAR_OHSUB,
|
|
VAR_OVSUB,
|
|
VAR_IN_T, VAR_T,
|
|
VAR_OUT_T, VAR_OT,
|
|
VAR_N,
|
|
VAR_VARS_NB
|
|
};
|
|
|
|
/* per-input dynamic filter state */
|
|
typedef struct LibplaceboInput {
|
|
int idx;
|
|
pl_renderer renderer;
|
|
pl_queue queue;
|
|
enum pl_queue_status qstatus;
|
|
struct pl_frame_mix mix; ///< temporary storage
|
|
AVFifo *out_pts; ///< timestamps of wanted output frames
|
|
int64_t status_pts;
|
|
int status;
|
|
} LibplaceboInput;
|
|
|
|
enum fit_mode {
|
|
FIT_FILL,
|
|
FIT_CONTAIN,
|
|
FIT_COVER,
|
|
FIT_NONE,
|
|
FIT_SCALE_DOWN,
|
|
FIT_MODE_NB,
|
|
};
|
|
|
|
typedef struct LibplaceboContext {
|
|
/* lavfi vulkan*/
|
|
FFVulkanContext vkctx;
|
|
|
|
/* libplacebo */
|
|
pl_log log;
|
|
pl_vulkan vulkan;
|
|
pl_gpu gpu;
|
|
pl_tex tex[4];
|
|
struct pl_custom_lut *lut;
|
|
|
|
/* dedicated renderer for linear output composition */
|
|
pl_renderer linear_rr;
|
|
pl_tex linear_tex;
|
|
|
|
/* input state */
|
|
LibplaceboInput *inputs;
|
|
int nb_inputs;
|
|
int nb_active;
|
|
|
|
/* settings */
|
|
char *out_format_string;
|
|
enum AVPixelFormat out_format;
|
|
uint8_t fillcolor[4];
|
|
double var_values[VAR_VARS_NB];
|
|
char *w_expr;
|
|
char *h_expr;
|
|
char *fps_string;
|
|
AVRational fps; ///< parsed FPS, or 0/0 for "none"
|
|
char *crop_x_expr, *crop_y_expr;
|
|
char *crop_w_expr, *crop_h_expr;
|
|
char *pos_x_expr, *pos_y_expr;
|
|
char *pos_w_expr, *pos_h_expr;
|
|
// Parsed expressions for input/output crop
|
|
AVExpr *crop_x_pexpr, *crop_y_pexpr, *crop_w_pexpr, *crop_h_pexpr;
|
|
AVExpr *pos_x_pexpr, *pos_y_pexpr, *pos_w_pexpr, *pos_h_pexpr;
|
|
float pad_crop_ratio;
|
|
float corner_rounding;
|
|
char *lut_filename;
|
|
enum pl_lut_type lut_type;
|
|
int force_original_aspect_ratio;
|
|
int force_divisible_by;
|
|
int reset_sar;
|
|
int normalize_sar;
|
|
int fit_mode;
|
|
int apply_filmgrain;
|
|
int apply_dovi;
|
|
int colorspace;
|
|
int color_range;
|
|
int color_primaries;
|
|
int color_trc;
|
|
int rotation;
|
|
int alpha_mode;
|
|
AVDictionary *extra_opts;
|
|
|
|
#if PL_API_VER >= 351
|
|
pl_cache cache;
|
|
char *shader_cache;
|
|
#endif
|
|
|
|
int have_hwdevice;
|
|
|
|
/* pl_render_params */
|
|
pl_options opts;
|
|
char *upscaler;
|
|
char *downscaler;
|
|
char *frame_mixer;
|
|
float antiringing;
|
|
int sigmoid;
|
|
int skip_aa;
|
|
int disable_linear;
|
|
int disable_builtin;
|
|
int force_dither;
|
|
int disable_fbos;
|
|
|
|
/* pl_deinterlace_params */
|
|
int deinterlace;
|
|
int skip_spatial_check;
|
|
int send_fields;
|
|
|
|
/* pl_deband_params */
|
|
int deband;
|
|
int deband_iterations;
|
|
float deband_threshold;
|
|
float deband_radius;
|
|
float deband_grain;
|
|
|
|
/* pl_color_adjustment */
|
|
float brightness;
|
|
float contrast;
|
|
float saturation;
|
|
float hue;
|
|
float gamma;
|
|
|
|
/* pl_peak_detect_params */
|
|
int peakdetect;
|
|
float smoothing;
|
|
float scene_low;
|
|
float scene_high;
|
|
float percentile;
|
|
|
|
/* pl_color_map_params */
|
|
int gamut_mode;
|
|
int tonemapping;
|
|
float tonemapping_param;
|
|
int inverse_tonemapping;
|
|
int tonemapping_lut_size;
|
|
float contrast_recovery;
|
|
float contrast_smoothness;
|
|
|
|
/* pl_dither_params */
|
|
int dithering;
|
|
int dither_lut_size;
|
|
int dither_temporal;
|
|
|
|
/* pl_cone_params */
|
|
int cones;
|
|
float cone_str;
|
|
|
|
/* custom shaders */
|
|
char *shader_path;
|
|
void *shader_bin;
|
|
int shader_bin_len;
|
|
const struct pl_hook *hooks[2];
|
|
int num_hooks;
|
|
} LibplaceboContext;
|
|
|
|
static inline enum pl_log_level get_log_level(void)
|
|
{
|
|
int av_lev = av_log_get_level();
|
|
return av_lev >= AV_LOG_TRACE ? PL_LOG_TRACE :
|
|
av_lev >= AV_LOG_DEBUG ? PL_LOG_DEBUG :
|
|
av_lev >= AV_LOG_VERBOSE ? PL_LOG_INFO :
|
|
av_lev >= AV_LOG_WARNING ? PL_LOG_WARN :
|
|
av_lev >= AV_LOG_ERROR ? PL_LOG_ERR :
|
|
av_lev >= AV_LOG_FATAL ? PL_LOG_FATAL :
|
|
PL_LOG_NONE;
|
|
}
|
|
|
|
static void pl_av_log(void *log_ctx, enum pl_log_level level, const char *msg)
|
|
{
|
|
int av_lev;
|
|
|
|
switch (level) {
|
|
case PL_LOG_FATAL: av_lev = AV_LOG_FATAL; break;
|
|
case PL_LOG_ERR: av_lev = AV_LOG_ERROR; break;
|
|
case PL_LOG_WARN: av_lev = AV_LOG_WARNING; break;
|
|
case PL_LOG_INFO: av_lev = AV_LOG_VERBOSE; break;
|
|
case PL_LOG_DEBUG: av_lev = AV_LOG_DEBUG; break;
|
|
case PL_LOG_TRACE: av_lev = AV_LOG_TRACE; break;
|
|
default: return;
|
|
}
|
|
|
|
av_log(log_ctx, av_lev, "%s\n", msg);
|
|
}
|
|
|
|
static const struct pl_tone_map_function *get_tonemapping_func(int tm) {
|
|
switch (tm) {
|
|
case TONE_MAP_AUTO: return &pl_tone_map_auto;
|
|
case TONE_MAP_CLIP: return &pl_tone_map_clip;
|
|
#if PL_API_VER >= 246
|
|
case TONE_MAP_ST2094_40: return &pl_tone_map_st2094_40;
|
|
case TONE_MAP_ST2094_10: return &pl_tone_map_st2094_10;
|
|
#endif
|
|
case TONE_MAP_BT2390: return &pl_tone_map_bt2390;
|
|
case TONE_MAP_BT2446A: return &pl_tone_map_bt2446a;
|
|
case TONE_MAP_SPLINE: return &pl_tone_map_spline;
|
|
case TONE_MAP_REINHARD: return &pl_tone_map_reinhard;
|
|
case TONE_MAP_MOBIUS: return &pl_tone_map_mobius;
|
|
case TONE_MAP_HABLE: return &pl_tone_map_hable;
|
|
case TONE_MAP_GAMMA: return &pl_tone_map_gamma;
|
|
case TONE_MAP_LINEAR: return &pl_tone_map_linear;
|
|
default: av_assert0(0);
|
|
}
|
|
}
|
|
|
|
static void set_gamut_mode(struct pl_color_map_params *p, int gamut_mode)
|
|
{
|
|
switch (gamut_mode) {
|
|
#if PL_API_VER >= 269
|
|
case GAMUT_MAP_CLIP: p->gamut_mapping = &pl_gamut_map_clip; return;
|
|
case GAMUT_MAP_PERCEPTUAL: p->gamut_mapping = &pl_gamut_map_perceptual; return;
|
|
case GAMUT_MAP_RELATIVE: p->gamut_mapping = &pl_gamut_map_relative; return;
|
|
case GAMUT_MAP_SATURATION: p->gamut_mapping = &pl_gamut_map_saturation; return;
|
|
case GAMUT_MAP_ABSOLUTE: p->gamut_mapping = &pl_gamut_map_absolute; return;
|
|
case GAMUT_MAP_DESATURATE: p->gamut_mapping = &pl_gamut_map_desaturate; return;
|
|
case GAMUT_MAP_DARKEN: p->gamut_mapping = &pl_gamut_map_darken; return;
|
|
case GAMUT_MAP_HIGHLIGHT: p->gamut_mapping = &pl_gamut_map_highlight; return;
|
|
case GAMUT_MAP_LINEAR: p->gamut_mapping = &pl_gamut_map_linear; return;
|
|
#else
|
|
case GAMUT_MAP_RELATIVE: p->intent = PL_INTENT_RELATIVE_COLORIMETRIC; return;
|
|
case GAMUT_MAP_SATURATION: p->intent = PL_INTENT_SATURATION; return;
|
|
case GAMUT_MAP_ABSOLUTE: p->intent = PL_INTENT_ABSOLUTE_COLORIMETRIC; return;
|
|
case GAMUT_MAP_DESATURATE: p->gamut_mode = PL_GAMUT_DESATURATE; return;
|
|
case GAMUT_MAP_DARKEN: p->gamut_mode = PL_GAMUT_DARKEN; return;
|
|
case GAMUT_MAP_HIGHLIGHT: p->gamut_mode = PL_GAMUT_WARN; return;
|
|
/* Use defaults for all other cases */
|
|
default: return;
|
|
#endif
|
|
}
|
|
|
|
av_assert0(0);
|
|
};
|
|
|
|
static int find_scaler(AVFilterContext *avctx,
|
|
const struct pl_filter_config **opt,
|
|
const char *name, int frame_mixing)
|
|
{
|
|
const struct pl_filter_preset *preset, *presets_avail;
|
|
presets_avail = frame_mixing ? pl_frame_mixers : pl_scale_filters;
|
|
|
|
if (!strcmp(name, "help")) {
|
|
av_log(avctx, AV_LOG_INFO, "Available scaler presets:\n");
|
|
for (preset = presets_avail; preset->name; preset++)
|
|
av_log(avctx, AV_LOG_INFO, " %s\n", preset->name);
|
|
return AVERROR_EXIT;
|
|
}
|
|
|
|
for (preset = presets_avail; preset->name; preset++) {
|
|
if (!strcmp(name, preset->name)) {
|
|
*opt = preset->filter;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "No such scaler preset '%s'.\n", name);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
static int parse_custom_lut(AVFilterContext *avctx)
|
|
{
|
|
LibplaceboContext *s = avctx->priv;
|
|
int ret;
|
|
uint8_t *lutbuf;
|
|
size_t lutbuf_size;
|
|
|
|
if ((ret = av_file_map(s->lut_filename, &lutbuf, &lutbuf_size, 0, s)) < 0) {
|
|
av_log(avctx, AV_LOG_ERROR,
|
|
"The LUT file '%s' could not be read: %s\n",
|
|
s->lut_filename, av_err2str(ret));
|
|
return ret;
|
|
}
|
|
|
|
s->lut = pl_lut_parse_cube(s->log, lutbuf, lutbuf_size);
|
|
av_file_unmap(lutbuf, lutbuf_size);
|
|
if (!s->lut)
|
|
return AVERROR(EINVAL);
|
|
return 0;
|
|
}
|
|
|
|
static int update_settings(AVFilterContext *ctx)
|
|
{
|
|
int err = 0;
|
|
LibplaceboContext *s = ctx->priv;
|
|
AVDictionaryEntry *e = NULL;
|
|
pl_options opts = s->opts;
|
|
int gamut_mode = s->gamut_mode;
|
|
|
|
opts->deinterlace_params = *pl_deinterlace_params(
|
|
.algo = s->deinterlace,
|
|
.skip_spatial_check = s->skip_spatial_check,
|
|
);
|
|
|
|
opts->deband_params = *pl_deband_params(
|
|
.iterations = s->deband_iterations,
|
|
.threshold = s->deband_threshold,
|
|
.radius = s->deband_radius,
|
|
.grain = s->deband_grain,
|
|
);
|
|
|
|
opts->sigmoid_params = pl_sigmoid_default_params;
|
|
|
|
opts->color_adjustment = (struct pl_color_adjustment) {
|
|
.brightness = s->brightness,
|
|
.contrast = s->contrast,
|
|
.saturation = s->saturation,
|
|
.hue = s->hue,
|
|
.gamma = s->gamma,
|
|
};
|
|
|
|
opts->peak_detect_params = *pl_peak_detect_params(
|
|
.smoothing_period = s->smoothing,
|
|
.scene_threshold_low = s->scene_low,
|
|
.scene_threshold_high = s->scene_high,
|
|
#if PL_API_VER >= 263
|
|
.percentile = s->percentile,
|
|
#endif
|
|
);
|
|
|
|
opts->color_map_params = *pl_color_map_params(
|
|
.tone_mapping_function = get_tonemapping_func(s->tonemapping),
|
|
.tone_mapping_param = s->tonemapping_param,
|
|
.inverse_tone_mapping = s->inverse_tonemapping,
|
|
.lut_size = s->tonemapping_lut_size,
|
|
#if PL_API_VER >= 285
|
|
.contrast_recovery = s->contrast_recovery,
|
|
.contrast_smoothness = s->contrast_smoothness,
|
|
#endif
|
|
);
|
|
|
|
set_gamut_mode(&opts->color_map_params, gamut_mode);
|
|
|
|
opts->dither_params = *pl_dither_params(
|
|
.method = s->dithering,
|
|
.lut_size = s->dither_lut_size,
|
|
.temporal = s->dither_temporal,
|
|
);
|
|
|
|
opts->cone_params = *pl_cone_params(
|
|
.cones = s->cones,
|
|
.strength = s->cone_str,
|
|
);
|
|
|
|
opts->params = *pl_render_params(
|
|
.antiringing_strength = s->antiringing,
|
|
.background_transparency = 1.0f - (float) s->fillcolor[3] / UINT8_MAX,
|
|
.background_color = {
|
|
(float) s->fillcolor[0] / UINT8_MAX,
|
|
(float) s->fillcolor[1] / UINT8_MAX,
|
|
(float) s->fillcolor[2] / UINT8_MAX,
|
|
},
|
|
#if PL_API_VER >= 277
|
|
.corner_rounding = s->corner_rounding,
|
|
#endif
|
|
|
|
.deinterlace_params = &opts->deinterlace_params,
|
|
.deband_params = s->deband ? &opts->deband_params : NULL,
|
|
.sigmoid_params = s->sigmoid ? &opts->sigmoid_params : NULL,
|
|
.color_adjustment = &opts->color_adjustment,
|
|
.peak_detect_params = s->peakdetect ? &opts->peak_detect_params : NULL,
|
|
.color_map_params = &opts->color_map_params,
|
|
.dither_params = s->dithering >= 0 ? &opts->dither_params : NULL,
|
|
.cone_params = s->cones ? &opts->cone_params : NULL,
|
|
|
|
.hooks = s->hooks,
|
|
.num_hooks = s->num_hooks,
|
|
|
|
.skip_anti_aliasing = s->skip_aa,
|
|
.disable_linear_scaling = s->disable_linear,
|
|
.disable_builtin_scalers = s->disable_builtin,
|
|
.force_dither = s->force_dither,
|
|
.disable_fbos = s->disable_fbos,
|
|
);
|
|
|
|
RET(find_scaler(ctx, &opts->params.upscaler, s->upscaler, 0));
|
|
RET(find_scaler(ctx, &opts->params.downscaler, s->downscaler, 0));
|
|
RET(find_scaler(ctx, &opts->params.frame_mixer, s->frame_mixer, 1));
|
|
|
|
#if PL_API_VER >= 309
|
|
while ((e = av_dict_get(s->extra_opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
|
if (!pl_options_set_str(s->opts, e->key, e->value)) {
|
|
err = AVERROR(EINVAL);
|
|
goto fail;
|
|
}
|
|
}
|
|
#else
|
|
(void) e;
|
|
if (av_dict_count(s->extra_opts) > 0)
|
|
av_log(avctx, AV_LOG_WARNING, "extra_opts requires libplacebo >= 6.309!\n");
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
static int parse_shader(AVFilterContext *avctx, const void *shader, size_t len)
|
|
{
|
|
LibplaceboContext *s = avctx->priv;
|
|
const struct pl_hook *hook;
|
|
|
|
hook = pl_mpv_user_shader_parse(s->gpu, shader, len);
|
|
if (!hook) {
|
|
av_log(avctx, AV_LOG_ERROR, "Failed parsing custom shader!\n");
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
s->hooks[s->num_hooks++] = hook;
|
|
return update_settings(avctx);
|
|
}
|
|
|
|
static void libplacebo_uninit(AVFilterContext *avctx);
|
|
static int libplacebo_config_input(AVFilterLink *inlink);
|
|
static int init_vulkan(AVFilterContext *avctx, const AVVulkanDeviceContext *hwctx);
|
|
|
|
static int libplacebo_init(AVFilterContext *avctx)
|
|
{
|
|
int err = 0;
|
|
LibplaceboContext *s = avctx->priv;
|
|
const AVVulkanDeviceContext *vkhwctx = NULL;
|
|
|
|
if (s->normalize_sar && s->fit_mode != FIT_FILL) {
|
|
av_log(avctx, AV_LOG_WARNING, "normalize_sar has no effect when using "
|
|
"a fit mode other than 'fill'\n");
|
|
}
|
|
|
|
/* Create libplacebo log context */
|
|
s->log = pl_log_create(PL_API_VER, pl_log_params(
|
|
.log_level = get_log_level(),
|
|
.log_cb = pl_av_log,
|
|
.log_priv = s,
|
|
));
|
|
|
|
if (!s->log)
|
|
return AVERROR(ENOMEM);
|
|
|
|
s->opts = pl_options_alloc(s->log);
|
|
if (!s->opts) {
|
|
libplacebo_uninit(avctx);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
|
|
#if PL_API_VER >= 351
|
|
if (s->shader_cache && s->shader_cache[0]) {
|
|
s->cache = pl_cache_create(pl_cache_params(
|
|
.log = s->log,
|
|
.get = pl_cache_get_file,
|
|
.set = pl_cache_set_file,
|
|
.priv = s->shader_cache,
|
|
));
|
|
if (!s->cache) {
|
|
libplacebo_uninit(avctx);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
if (s->out_format_string) {
|
|
s->out_format = av_get_pix_fmt(s->out_format_string);
|
|
if (s->out_format == AV_PIX_FMT_NONE) {
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid output format: %s\n",
|
|
s->out_format_string);
|
|
libplacebo_uninit(avctx);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
} else {
|
|
s->out_format = AV_PIX_FMT_NONE;
|
|
}
|
|
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
AVFilterPad pad = {
|
|
.name = av_asprintf("input%d", i),
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
.config_props = &libplacebo_config_input,
|
|
};
|
|
if (!pad.name)
|
|
return AVERROR(ENOMEM);
|
|
RET(ff_append_inpad_free_name(avctx, &pad));
|
|
}
|
|
|
|
RET(update_settings(avctx));
|
|
RET(av_expr_parse(&s->crop_x_pexpr, s->crop_x_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->crop_y_pexpr, s->crop_y_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->crop_w_pexpr, s->crop_w_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->crop_h_pexpr, s->crop_h_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->pos_x_pexpr, s->pos_x_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->pos_y_pexpr, s->pos_y_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->pos_w_pexpr, s->pos_w_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
RET(av_expr_parse(&s->pos_h_pexpr, s->pos_h_expr, var_names,
|
|
NULL, NULL, NULL, NULL, 0, s));
|
|
|
|
if (strcmp(s->fps_string, "none") != 0)
|
|
RET(av_parse_video_rate(&s->fps, s->fps_string));
|
|
|
|
if (avctx->hw_device_ctx) {
|
|
const AVHWDeviceContext *avhwctx = (void *) avctx->hw_device_ctx->data;
|
|
if (avhwctx->type == AV_HWDEVICE_TYPE_VULKAN)
|
|
vkhwctx = avhwctx->hwctx;
|
|
}
|
|
|
|
RET(init_vulkan(avctx, vkhwctx));
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
#if PL_API_VER >= 278
|
|
static void lock_queue(void *priv, uint32_t qf, uint32_t qidx)
|
|
{
|
|
AVHWDeviceContext *avhwctx = priv;
|
|
const AVVulkanDeviceContext *hwctx = avhwctx->hwctx;
|
|
hwctx->lock_queue(avhwctx, qf, qidx);
|
|
}
|
|
|
|
static void unlock_queue(void *priv, uint32_t qf, uint32_t qidx)
|
|
{
|
|
AVHWDeviceContext *avhwctx = priv;
|
|
const AVVulkanDeviceContext *hwctx = avhwctx->hwctx;
|
|
hwctx->unlock_queue(avhwctx, qf, qidx);
|
|
}
|
|
#endif
|
|
|
|
static int input_init(AVFilterContext *avctx, LibplaceboInput *input, int idx)
|
|
{
|
|
LibplaceboContext *s = avctx->priv;
|
|
|
|
input->out_pts = av_fifo_alloc2(1, sizeof(int64_t), AV_FIFO_FLAG_AUTO_GROW);
|
|
if (!input->out_pts)
|
|
return AVERROR(ENOMEM);
|
|
input->queue = pl_queue_create(s->gpu);
|
|
input->renderer = pl_renderer_create(s->log, s->gpu);
|
|
input->idx = idx;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void input_uninit(LibplaceboInput *input)
|
|
{
|
|
pl_renderer_destroy(&input->renderer);
|
|
pl_queue_destroy(&input->queue);
|
|
av_fifo_freep2(&input->out_pts);
|
|
}
|
|
|
|
static int init_vulkan(AVFilterContext *avctx, const AVVulkanDeviceContext *hwctx)
|
|
{
|
|
int err = 0;
|
|
LibplaceboContext *s = avctx->priv;
|
|
uint8_t *buf = NULL;
|
|
size_t buf_len;
|
|
|
|
if (hwctx) {
|
|
#if PL_API_VER >= 278
|
|
struct pl_vulkan_import_params import_params = {
|
|
.instance = hwctx->inst,
|
|
.get_proc_addr = hwctx->get_proc_addr,
|
|
.phys_device = hwctx->phys_dev,
|
|
.device = hwctx->act_dev,
|
|
.extensions = hwctx->enabled_dev_extensions,
|
|
.num_extensions = hwctx->nb_enabled_dev_extensions,
|
|
.features = &hwctx->device_features,
|
|
.lock_queue = lock_queue,
|
|
.unlock_queue = unlock_queue,
|
|
.queue_ctx = avctx->hw_device_ctx->data,
|
|
.queue_graphics = {
|
|
.index = VK_QUEUE_FAMILY_IGNORED,
|
|
.count = 0,
|
|
},
|
|
.queue_compute = {
|
|
.index = VK_QUEUE_FAMILY_IGNORED,
|
|
.count = 0,
|
|
},
|
|
.queue_transfer = {
|
|
.index = VK_QUEUE_FAMILY_IGNORED,
|
|
.count = 0,
|
|
},
|
|
/* This is the highest version created by hwcontext_vulkan.c */
|
|
.max_api_version = VK_API_VERSION_1_3,
|
|
};
|
|
for (int i = 0; i < hwctx->nb_qf; i++) {
|
|
const AVVulkanDeviceQueueFamily *qf = &hwctx->qf[i];
|
|
|
|
if (qf->flags & VK_QUEUE_GRAPHICS_BIT) {
|
|
import_params.queue_graphics.index = qf->idx;
|
|
import_params.queue_graphics.count = qf->num;
|
|
}
|
|
if (qf->flags & VK_QUEUE_COMPUTE_BIT) {
|
|
import_params.queue_compute.index = qf->idx;
|
|
import_params.queue_compute.count = qf->num;
|
|
}
|
|
if (qf->flags & VK_QUEUE_TRANSFER_BIT) {
|
|
import_params.queue_transfer.index = qf->idx;
|
|
import_params.queue_transfer.count = qf->num;
|
|
}
|
|
}
|
|
|
|
/* Import libavfilter vulkan context into libplacebo */
|
|
s->vulkan = pl_vulkan_import(s->log, &import_params);
|
|
#else
|
|
av_log(avctx, AV_LOG_ERROR, "libplacebo version %s too old to import "
|
|
"Vulkan device, remove it or upgrade libplacebo to >= 5.278\n",
|
|
PL_VERSION);
|
|
err = AVERROR_EXTERNAL;
|
|
goto fail;
|
|
#endif
|
|
|
|
s->have_hwdevice = 1;
|
|
} else {
|
|
s->vulkan = pl_vulkan_create(s->log, pl_vulkan_params(
|
|
.queue_count = 0, /* enable all queues for parallelization */
|
|
));
|
|
}
|
|
|
|
if (!s->vulkan) {
|
|
av_log(avctx, AV_LOG_ERROR, "Failed %s Vulkan device!\n",
|
|
hwctx ? "importing" : "creating");
|
|
err = AVERROR_EXTERNAL;
|
|
goto fail;
|
|
}
|
|
|
|
s->gpu = s->vulkan->gpu;
|
|
#if PL_API_VER >= 351
|
|
pl_gpu_set_cache(s->gpu, s->cache);
|
|
#endif
|
|
|
|
/* Parse the user shaders, if requested */
|
|
if (s->shader_bin_len)
|
|
RET(parse_shader(avctx, s->shader_bin, s->shader_bin_len));
|
|
|
|
if (s->shader_path && s->shader_path[0]) {
|
|
RET(av_file_map(s->shader_path, &buf, &buf_len, 0, s));
|
|
RET(parse_shader(avctx, buf, buf_len));
|
|
}
|
|
|
|
if (s->lut_filename)
|
|
RET(parse_custom_lut(avctx));
|
|
|
|
/* Initialize inputs */
|
|
s->inputs = av_calloc(s->nb_inputs, sizeof(*s->inputs));
|
|
if (!s->inputs)
|
|
return AVERROR(ENOMEM);
|
|
for (int i = 0; i < s->nb_inputs; i++)
|
|
RET(input_init(avctx, &s->inputs[i], i));
|
|
s->nb_active = s->nb_inputs;
|
|
s->linear_rr = pl_renderer_create(s->log, s->gpu);
|
|
|
|
/* fall through */
|
|
fail:
|
|
if (buf)
|
|
av_file_unmap(buf, buf_len);
|
|
return err;
|
|
}
|
|
|
|
static void libplacebo_uninit(AVFilterContext *avctx)
|
|
{
|
|
LibplaceboContext *s = avctx->priv;
|
|
|
|
for (int i = 0; i < FF_ARRAY_ELEMS(s->tex); i++)
|
|
pl_tex_destroy(s->gpu, &s->tex[i]);
|
|
for (int i = 0; i < s->num_hooks; i++)
|
|
pl_mpv_user_shader_destroy(&s->hooks[i]);
|
|
if (s->inputs) {
|
|
for (int i = 0; i < s->nb_inputs; i++)
|
|
input_uninit(&s->inputs[i]);
|
|
av_freep(&s->inputs);
|
|
}
|
|
|
|
pl_lut_free(&s->lut);
|
|
#if PL_API_VER >= 351
|
|
pl_cache_destroy(&s->cache);
|
|
#endif
|
|
pl_renderer_destroy(&s->linear_rr);
|
|
pl_tex_destroy(s->gpu, &s->linear_tex);
|
|
pl_options_free(&s->opts);
|
|
pl_vulkan_destroy(&s->vulkan);
|
|
pl_log_destroy(&s->log);
|
|
ff_vk_uninit(&s->vkctx);
|
|
s->gpu = NULL;
|
|
|
|
av_expr_free(s->crop_x_pexpr);
|
|
av_expr_free(s->crop_y_pexpr);
|
|
av_expr_free(s->crop_w_pexpr);
|
|
av_expr_free(s->crop_h_pexpr);
|
|
av_expr_free(s->pos_x_pexpr);
|
|
av_expr_free(s->pos_y_pexpr);
|
|
av_expr_free(s->pos_w_pexpr);
|
|
av_expr_free(s->pos_h_pexpr);
|
|
}
|
|
|
|
static int libplacebo_process_command(AVFilterContext *ctx, const char *cmd,
|
|
const char *arg, char *res, int res_len,
|
|
int flags)
|
|
{
|
|
int err = 0;
|
|
RET(ff_filter_process_command(ctx, cmd, arg, res, res_len, flags));
|
|
RET(update_settings(ctx));
|
|
return 0;
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
static const AVFrame *ref_frame(const struct pl_frame_mix *mix)
|
|
{
|
|
for (int i = 0; i < mix->num_frames; i++) {
|
|
if (i+1 == mix->num_frames || mix->timestamps[i+1] > 0)
|
|
return pl_get_mapped_avframe(mix->frames[i]);
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static void update_crops(AVFilterContext *ctx, LibplaceboInput *in,
|
|
struct pl_frame *target, double target_pts)
|
|
{
|
|
FilterLink *outl = ff_filter_link(ctx->outputs[0]);
|
|
LibplaceboContext *s = ctx->priv;
|
|
const AVFilterLink *outlink = ctx->outputs[0];
|
|
const AVFilterLink *inlink = ctx->inputs[in->idx];
|
|
const AVFrame *ref = ref_frame(&in->mix);
|
|
|
|
for (int i = 0; i < in->mix.num_frames; i++) {
|
|
// Mutate the `pl_frame.crop` fields in-place. This is fine because we
|
|
// own the entire pl_queue, and hence, the pointed-at frames.
|
|
struct pl_frame *image = (struct pl_frame *) in->mix.frames[i];
|
|
const AVFrame *src = pl_get_mapped_avframe(image);
|
|
double image_pts = TS2T(src->pts, inlink->time_base);
|
|
|
|
/* Update dynamic variables */
|
|
s->var_values[VAR_IN_IDX] = s->var_values[VAR_IDX] = in->idx;
|
|
s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = inlink->w;
|
|
s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = inlink->h;
|
|
s->var_values[VAR_A] = (double) inlink->w / inlink->h;
|
|
s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
|
|
av_q2d(inlink->sample_aspect_ratio) : 1.0;
|
|
s->var_values[VAR_IN_T] = s->var_values[VAR_T] = image_pts;
|
|
s->var_values[VAR_OUT_T] = s->var_values[VAR_OT] = target_pts;
|
|
s->var_values[VAR_N] = outl->frame_count_out;
|
|
|
|
/* Clear these explicitly to avoid leaking previous frames' state */
|
|
s->var_values[VAR_CROP_W] = s->var_values[VAR_CW] = NAN;
|
|
s->var_values[VAR_CROP_H] = s->var_values[VAR_CH] = NAN;
|
|
s->var_values[VAR_POS_W] = s->var_values[VAR_PW] = NAN;
|
|
s->var_values[VAR_POS_H] = s->var_values[VAR_PH] = NAN;
|
|
|
|
/* Compute dimensions first and placement second */
|
|
s->var_values[VAR_CROP_W] = s->var_values[VAR_CW] =
|
|
av_expr_eval(s->crop_w_pexpr, s->var_values, NULL);
|
|
s->var_values[VAR_CROP_H] = s->var_values[VAR_CH] =
|
|
av_expr_eval(s->crop_h_pexpr, s->var_values, NULL);
|
|
s->var_values[VAR_CROP_W] = s->var_values[VAR_CW] =
|
|
av_expr_eval(s->crop_w_pexpr, s->var_values, NULL);
|
|
s->var_values[VAR_POS_W] = s->var_values[VAR_PW] =
|
|
av_expr_eval(s->pos_w_pexpr, s->var_values, NULL);
|
|
s->var_values[VAR_POS_H] = s->var_values[VAR_PH] =
|
|
av_expr_eval(s->pos_h_pexpr, s->var_values, NULL);
|
|
s->var_values[VAR_POS_W] = s->var_values[VAR_PW] =
|
|
av_expr_eval(s->pos_w_pexpr, s->var_values, NULL);
|
|
|
|
image->crop.x0 = av_expr_eval(s->crop_x_pexpr, s->var_values, NULL);
|
|
image->crop.y0 = av_expr_eval(s->crop_y_pexpr, s->var_values, NULL);
|
|
image->crop.x1 = image->crop.x0 + s->var_values[VAR_CROP_W];
|
|
image->crop.y1 = image->crop.y0 + s->var_values[VAR_CROP_H];
|
|
image->rotation = s->rotation;
|
|
if (s->rotation % PL_ROTATION_180 == PL_ROTATION_90) {
|
|
/* Libplacebo expects the input crop relative to the actual frame
|
|
* dimensions, so un-transpose them here */
|
|
FFSWAP(float, image->crop.x0, image->crop.y0);
|
|
FFSWAP(float, image->crop.x1, image->crop.y1);
|
|
}
|
|
|
|
if (src == ref) {
|
|
/* Only update the target crop once, for the 'reference' frame */
|
|
target->crop.x0 = av_expr_eval(s->pos_x_pexpr, s->var_values, NULL);
|
|
target->crop.y0 = av_expr_eval(s->pos_y_pexpr, s->var_values, NULL);
|
|
target->crop.x1 = target->crop.x0 + s->var_values[VAR_POS_W];
|
|
target->crop.y1 = target->crop.y0 + s->var_values[VAR_POS_H];
|
|
|
|
/* Effective visual crop */
|
|
const float w_adj = av_q2d(inlink->sample_aspect_ratio) /
|
|
av_q2d(outlink->sample_aspect_ratio);
|
|
|
|
pl_rect2df fixed = image->crop;
|
|
pl_rect2df_stretch(&fixed, w_adj, 1.0);
|
|
|
|
switch (s->fit_mode) {
|
|
case FIT_FILL:
|
|
if (s->normalize_sar)
|
|
pl_rect2df_aspect_copy(&target->crop, &fixed, s->pad_crop_ratio);
|
|
break;
|
|
case FIT_CONTAIN:
|
|
pl_rect2df_aspect_copy(&target->crop, &fixed, 0.0);
|
|
break;
|
|
case FIT_COVER:
|
|
pl_rect2df_aspect_copy(&target->crop, &fixed, 1.0);
|
|
break;
|
|
case FIT_NONE: {
|
|
const float sx = fabsf(pl_rect_w(fixed)) / pl_rect_w(target->crop);
|
|
const float sy = fabsf(pl_rect_h(fixed)) / pl_rect_h(target->crop);
|
|
pl_rect2df_stretch(&target->crop, sx, sy);
|
|
break;
|
|
}
|
|
case FIT_SCALE_DOWN:
|
|
pl_rect2df_aspect_fit(&target->crop, &fixed, 0.0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Construct and emit an output frame for a given timestamp */
|
|
static int output_frame(AVFilterContext *ctx, int64_t pts)
|
|
{
|
|
int err = 0, ok, changed = 0;
|
|
LibplaceboContext *s = ctx->priv;
|
|
pl_options opts = s->opts;
|
|
AVFilterLink *outlink = ctx->outputs[0];
|
|
const AVPixFmtDescriptor *outdesc = av_pix_fmt_desc_get(outlink->format);
|
|
const double target_pts = TS2T(pts, outlink->time_base);
|
|
struct pl_frame target;
|
|
const AVFrame *ref = NULL;
|
|
AVFrame *out;
|
|
|
|
/* Count the number of visible inputs, by excluding frames which are fully
|
|
* obscured or which have no frames in the mix */
|
|
int idx_start = 0, nb_visible = 0;
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
LibplaceboInput *in = &s->inputs[i];
|
|
struct pl_frame dummy;
|
|
if (in->qstatus != PL_QUEUE_OK || !in->mix.num_frames)
|
|
continue;
|
|
const struct pl_frame *cur = pl_frame_mix_nearest(&in->mix);
|
|
av_assert1(cur);
|
|
update_crops(ctx, in, &dummy, target_pts);
|
|
const int x0 = roundf(FFMIN(dummy.crop.x0, dummy.crop.x1)),
|
|
y0 = roundf(FFMIN(dummy.crop.y0, dummy.crop.y1)),
|
|
x1 = roundf(FFMAX(dummy.crop.x0, dummy.crop.x1)),
|
|
y1 = roundf(FFMAX(dummy.crop.y0, dummy.crop.y1));
|
|
|
|
/* If an opaque frame covers entire the output, disregard all lower layers */
|
|
const bool cropped = x0 > 0 || y0 > 0 || x1 < outlink->w || y1 < outlink->h;
|
|
if (!cropped && cur->repr.alpha == PL_ALPHA_NONE) {
|
|
idx_start = i;
|
|
nb_visible = 0;
|
|
ref = NULL;
|
|
}
|
|
/* Use first visible input as overall reference */
|
|
if (!ref)
|
|
ref = ref_frame(&in->mix);
|
|
nb_visible++;
|
|
}
|
|
|
|
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
|
if (!out)
|
|
return AVERROR(ENOMEM);
|
|
|
|
if (!ref)
|
|
goto props_done;
|
|
|
|
RET(av_frame_copy_props(out, ref));
|
|
out->width = outlink->w;
|
|
out->height = outlink->h;
|
|
out->colorspace = outlink->colorspace;
|
|
out->color_range = outlink->color_range;
|
|
out->alpha_mode = outlink->alpha_mode;
|
|
if (s->deinterlace)
|
|
out->flags &= ~(AV_FRAME_FLAG_INTERLACED | AV_FRAME_FLAG_TOP_FIELD_FIRST);
|
|
|
|
if (s->apply_dovi && av_frame_get_side_data(ref, AV_FRAME_DATA_DOVI_METADATA)) {
|
|
/* Output of dovi reshaping is always BT.2020+PQ, so infer the correct
|
|
* output colorspace defaults */
|
|
out->color_primaries = AVCOL_PRI_BT2020;
|
|
out->color_trc = AVCOL_TRC_SMPTE2084;
|
|
changed |= AV_SIDE_DATA_PROP_COLOR_DEPENDENT;
|
|
}
|
|
|
|
if (s->color_trc >= 0)
|
|
out->color_trc = s->color_trc;
|
|
if (s->color_primaries >= 0)
|
|
out->color_primaries = s->color_primaries;
|
|
|
|
/* Strip side data if no longer relevant */
|
|
if (out->width != ref->width || out->height != ref->height)
|
|
changed |= AV_SIDE_DATA_PROP_SIZE_DEPENDENT;
|
|
if (ref->color_trc != out->color_trc || ref->color_primaries != out->color_primaries)
|
|
changed |= AV_SIDE_DATA_PROP_COLOR_DEPENDENT;
|
|
av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data, changed);
|
|
|
|
if (s->apply_filmgrain)
|
|
av_frame_remove_side_data(out, AV_FRAME_DATA_FILM_GRAIN_PARAMS);
|
|
|
|
if (s->reset_sar) {
|
|
out->sample_aspect_ratio = ref->sample_aspect_ratio;
|
|
} else {
|
|
const AVRational ar_ref = { ref->width, ref->height };
|
|
const AVRational ar_out = { out->width, out->height };
|
|
const AVRational stretch = av_div_q(ar_ref, ar_out);
|
|
out->sample_aspect_ratio = av_mul_q(ref->sample_aspect_ratio, stretch);
|
|
}
|
|
|
|
props_done:
|
|
out->pts = pts;
|
|
if (s->fps.num)
|
|
out->duration = 1;
|
|
|
|
/* Map, render and unmap output frame */
|
|
if (outdesc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
|
|
ok = pl_map_avframe_ex(s->gpu, &target, pl_avframe_params(
|
|
.frame = out,
|
|
.map_dovi = false,
|
|
));
|
|
} else {
|
|
ok = pl_frame_recreate_from_avframe(s->gpu, &target, s->tex, out);
|
|
}
|
|
if (!ok) {
|
|
err = AVERROR_EXTERNAL;
|
|
goto fail;
|
|
}
|
|
|
|
struct pl_frame orig_target = target;
|
|
bool use_linear_compositor = false;
|
|
if (s->linear_tex && target.color.transfer != PL_COLOR_TRC_LINEAR &&
|
|
!s->disable_linear && nb_visible > 1) {
|
|
target = (struct pl_frame) {
|
|
.num_planes = 1,
|
|
.planes[0] = {
|
|
.components = 4,
|
|
.component_mapping = {0, 1, 2, 3},
|
|
.texture = s->linear_tex,
|
|
},
|
|
.repr = pl_color_repr_rgb,
|
|
.color = orig_target.color,
|
|
.rotation = orig_target.rotation,
|
|
};
|
|
target.repr.alpha = PL_ALPHA_PREMULTIPLIED;
|
|
target.color.transfer = PL_COLOR_TRC_LINEAR;
|
|
use_linear_compositor = true;
|
|
}
|
|
|
|
/* Draw first frame opaque, others with blending */
|
|
struct pl_render_params tmp_params = opts->params;
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
LibplaceboInput *in = &s->inputs[i];
|
|
FilterLink *il = ff_filter_link(ctx->inputs[i]);
|
|
FilterLink *ol = ff_filter_link(outlink);
|
|
int high_fps = av_cmp_q(il->frame_rate, ol->frame_rate) >= 0;
|
|
if (in->qstatus != PL_QUEUE_OK || !in->mix.num_frames || i < idx_start) {
|
|
pl_renderer_flush_cache(in->renderer);
|
|
continue;
|
|
}
|
|
tmp_params.skip_caching_single_frame = high_fps;
|
|
update_crops(ctx, in, &target, target_pts);
|
|
pl_render_image_mix(in->renderer, &in->mix, &target, &tmp_params);
|
|
|
|
/* Force straight output and set correct blend operator. This is
|
|
* required to get correct blending onto YUV target buffers. */
|
|
target.repr.alpha = PL_ALPHA_INDEPENDENT;
|
|
tmp_params.blend_params = &pl_alpha_overlay;
|
|
#if PL_API_VER >= 346
|
|
tmp_params.background = tmp_params.border = PL_CLEAR_SKIP;
|
|
#else
|
|
tmp_params.skip_target_clearing = true;
|
|
#endif
|
|
}
|
|
|
|
if (use_linear_compositor) {
|
|
/* Blit the linear intermediate image to the output frame */
|
|
target.crop = orig_target.crop = (struct pl_rect2df) {0};
|
|
target.repr.alpha = PL_ALPHA_PREMULTIPLIED;
|
|
pl_render_image(s->linear_rr, &target, &orig_target, &opts->params);
|
|
target = orig_target;
|
|
} else if (!ref) {
|
|
/* Render an empty image to clear the frame to the desired fill color */
|
|
pl_render_image(s->linear_rr, NULL, &target, &opts->params);
|
|
}
|
|
|
|
if (outdesc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
|
|
pl_unmap_avframe(s->gpu, &target);
|
|
} else if (!pl_download_avframe(s->gpu, &target, out)) {
|
|
err = AVERROR_EXTERNAL;
|
|
goto fail;
|
|
}
|
|
return ff_filter_frame(outlink, out);
|
|
|
|
fail:
|
|
av_frame_free(&out);
|
|
return err;
|
|
}
|
|
|
|
static bool map_frame(pl_gpu gpu, pl_tex *tex,
|
|
const struct pl_source_frame *src,
|
|
struct pl_frame *out)
|
|
{
|
|
AVFrame *avframe = src->frame_data;
|
|
LibplaceboContext *s = avframe->opaque;
|
|
bool ok = pl_map_avframe_ex(gpu, out, pl_avframe_params(
|
|
.frame = avframe,
|
|
.tex = tex,
|
|
.map_dovi = s->apply_dovi,
|
|
));
|
|
out->lut = s->lut;
|
|
out->lut_type = s->lut_type;
|
|
|
|
if (!s->apply_filmgrain)
|
|
out->film_grain.type = PL_FILM_GRAIN_NONE;
|
|
|
|
av_frame_free(&avframe);
|
|
return ok;
|
|
}
|
|
|
|
static void unmap_frame(pl_gpu gpu, struct pl_frame *frame,
|
|
const struct pl_source_frame *src)
|
|
{
|
|
pl_unmap_avframe(gpu, frame);
|
|
}
|
|
|
|
static void discard_frame(const struct pl_source_frame *src)
|
|
{
|
|
AVFrame *avframe = src->frame_data;
|
|
av_frame_free(&avframe);
|
|
}
|
|
|
|
static int handle_input(AVFilterContext *ctx, LibplaceboInput *input)
|
|
{
|
|
int ret, status;
|
|
LibplaceboContext *s = ctx->priv;
|
|
AVFilterLink *outlink = ctx->outputs[0];
|
|
AVFilterLink *inlink = ctx->inputs[input->idx];
|
|
AVFrame *in;
|
|
int64_t pts;
|
|
|
|
while ((ret = ff_inlink_consume_frame(inlink, &in)) > 0) {
|
|
struct pl_source_frame src = {
|
|
.pts = TS2T(in->pts, inlink->time_base),
|
|
.duration = TS2T(in->duration, inlink->time_base),
|
|
.first_field = s->deinterlace ? pl_field_from_avframe(in) : PL_FIELD_NONE,
|
|
.frame_data = in,
|
|
.map = map_frame,
|
|
.unmap = unmap_frame,
|
|
.discard = discard_frame,
|
|
};
|
|
|
|
in->opaque = s;
|
|
pl_queue_push(input->queue, &src);
|
|
|
|
if (!s->fps.num) {
|
|
/* Internally queue an output frame for the same PTS */
|
|
pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base);
|
|
av_fifo_write(input->out_pts, &pts, 1);
|
|
|
|
if (s->send_fields && src.first_field != PL_FIELD_NONE) {
|
|
/* Queue the second field for interlaced content */
|
|
pts += av_rescale_q(in->duration, inlink->time_base, outlink->time_base) / 2;
|
|
av_fifo_write(input->out_pts, &pts, 1);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (!input->status && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
|
|
pts = av_rescale_q_rnd(pts, inlink->time_base, outlink->time_base,
|
|
AV_ROUND_UP);
|
|
pl_queue_push(input->queue, NULL); /* Signal EOF to pl_queue */
|
|
input->status = status;
|
|
input->status_pts = pts;
|
|
s->nb_active--;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void drain_input_pts(LibplaceboInput *in, int64_t until)
|
|
{
|
|
int64_t pts;
|
|
while (av_fifo_peek(in->out_pts, &pts, 1, 0) >= 0 && pts <= until)
|
|
av_fifo_drain2(in->out_pts, 1);
|
|
}
|
|
|
|
static int libplacebo_activate(AVFilterContext *ctx)
|
|
{
|
|
int ret, ok = 0, retry = 0;
|
|
LibplaceboContext *s = ctx->priv;
|
|
AVFilterLink *outlink = ctx->outputs[0];
|
|
FilterLink *outl = ff_filter_link(outlink);
|
|
int64_t pts, out_pts;
|
|
|
|
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
|
|
pl_log_level_update(s->log, get_log_level());
|
|
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
if ((ret = handle_input(ctx, &s->inputs[i])) < 0)
|
|
return ret;
|
|
}
|
|
|
|
if (ff_outlink_frame_wanted(outlink)) {
|
|
if (s->fps.num) {
|
|
out_pts = outl->frame_count_out;
|
|
} else {
|
|
/* Determine the PTS of the next frame from any active input */
|
|
out_pts = INT64_MAX;
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
LibplaceboInput *in = &s->inputs[i];
|
|
if (av_fifo_peek(in->out_pts, &pts, 1, 0) >= 0) {
|
|
out_pts = FFMIN(out_pts, pts);
|
|
} else if (!in->status) {
|
|
ff_inlink_request_frame(ctx->inputs[i]);
|
|
retry = true;
|
|
}
|
|
}
|
|
|
|
if (retry) /* some inputs are incomplete */
|
|
return 0;
|
|
}
|
|
|
|
/* Update all input queues to the chosen out_pts */
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
LibplaceboInput *in = &s->inputs[i];
|
|
FilterLink *l = ff_filter_link(outlink);
|
|
if (in->status && out_pts >= in->status_pts) {
|
|
in->qstatus = PL_QUEUE_EOF;
|
|
continue;
|
|
}
|
|
|
|
in->qstatus = pl_queue_update(in->queue, &in->mix, pl_queue_params(
|
|
.pts = TS2T(out_pts, outlink->time_base),
|
|
.radius = pl_frame_mix_radius(&s->opts->params),
|
|
.vsync_duration = l->frame_rate.num ? av_q2d(av_inv_q(l->frame_rate)) : 0,
|
|
));
|
|
|
|
switch (in->qstatus) {
|
|
case PL_QUEUE_MORE:
|
|
ff_inlink_request_frame(ctx->inputs[i]);
|
|
retry = true;
|
|
break;
|
|
case PL_QUEUE_OK:
|
|
ok |= in->mix.num_frames > 0;
|
|
break;
|
|
case PL_QUEUE_ERR:
|
|
return AVERROR_EXTERNAL;
|
|
}
|
|
}
|
|
|
|
/* In constant FPS mode, we can also output an empty frame if there is
|
|
* a gap in the input timeline and we still have active streams */
|
|
ok |= s->fps.num && s->nb_active > 0;
|
|
|
|
if (retry) {
|
|
return 0;
|
|
} else if (ok) {
|
|
/* Got any valid frame mixes, drain PTS queue and render output */
|
|
for (int i = 0; i < s->nb_inputs; i++)
|
|
drain_input_pts(&s->inputs[i], out_pts);
|
|
return output_frame(ctx, out_pts);
|
|
} else if (s->nb_active == 0) {
|
|
/* Forward most recent status */
|
|
int status = s->inputs[0].status;
|
|
int64_t status_pts = s->inputs[0].status_pts;
|
|
for (int i = 1; i < s->nb_inputs; i++) {
|
|
const LibplaceboInput *in = &s->inputs[i];
|
|
if (in->status_pts > status_pts) {
|
|
status = s->inputs[i].status;
|
|
status_pts = s->inputs[i].status_pts;
|
|
}
|
|
}
|
|
ff_outlink_set_status(outlink, status, status_pts);
|
|
return 0;
|
|
}
|
|
|
|
return AVERROR_BUG;
|
|
}
|
|
|
|
return FFERROR_NOT_READY;
|
|
}
|
|
|
|
static int libplacebo_query_format(const AVFilterContext *ctx,
|
|
AVFilterFormatsConfig **cfg_in,
|
|
AVFilterFormatsConfig **cfg_out)
|
|
{
|
|
int err;
|
|
const LibplaceboContext *s = ctx->priv;
|
|
const AVPixFmtDescriptor *desc = NULL;
|
|
AVFilterFormats *infmts = NULL, *outfmts = NULL;
|
|
|
|
/* List AV_PIX_FMT_VULKAN first to prefer it when possible */
|
|
if (s->have_hwdevice) {
|
|
RET(ff_add_format(&infmts, AV_PIX_FMT_VULKAN));
|
|
if (s->out_format == AV_PIX_FMT_NONE || av_vkfmt_from_pixfmt(s->out_format))
|
|
RET(ff_add_format(&outfmts, AV_PIX_FMT_VULKAN));
|
|
}
|
|
|
|
while ((desc = av_pix_fmt_desc_next(desc))) {
|
|
enum AVPixelFormat pixfmt = av_pix_fmt_desc_get_id(desc);
|
|
if (pixfmt == AV_PIX_FMT_VULKAN)
|
|
continue; /* Handled above */
|
|
|
|
#if PL_API_VER < 232
|
|
// Older libplacebo can't handle >64-bit pixel formats, so safe-guard
|
|
// this to prevent triggering an assertion
|
|
if (av_get_bits_per_pixel(desc) > 64)
|
|
continue;
|
|
#endif
|
|
|
|
if (!pl_test_pixfmt(s->gpu, pixfmt))
|
|
continue;
|
|
|
|
RET(ff_add_format(&infmts, pixfmt));
|
|
|
|
/* Filter for supported output pixel formats */
|
|
if (desc->flags & AV_PIX_FMT_FLAG_BE)
|
|
continue; /* BE formats are not supported by pl_download_avframe */
|
|
|
|
/* Mask based on user specified format */
|
|
if (pixfmt != s->out_format && s->out_format != AV_PIX_FMT_NONE)
|
|
continue;
|
|
|
|
#if PL_API_VER >= 293
|
|
if (!pl_test_pixfmt_caps(s->gpu, pixfmt, PL_FMT_CAP_RENDERABLE))
|
|
continue;
|
|
#endif
|
|
|
|
RET(ff_add_format(&outfmts, pixfmt));
|
|
}
|
|
|
|
if (!infmts || !outfmts) {
|
|
err = AVERROR(EINVAL);
|
|
goto fail;
|
|
}
|
|
|
|
for (int i = 0; i < s->nb_inputs; i++) {
|
|
if (i > 0) {
|
|
/* Duplicate the format list for each subsequent input */
|
|
infmts = NULL;
|
|
for (int n = 0; n < cfg_in[0]->formats->nb_formats; n++)
|
|
RET(ff_add_format(&infmts, cfg_in[0]->formats->formats[n]));
|
|
}
|
|
RET(ff_formats_ref(infmts, &cfg_in[i]->formats));
|
|
RET(ff_formats_ref(ff_all_color_spaces(), &cfg_in[i]->color_spaces));
|
|
RET(ff_formats_ref(ff_all_color_ranges(), &cfg_in[i]->color_ranges));
|
|
RET(ff_formats_ref(ff_all_alpha_modes(), &cfg_in[i]->alpha_modes));
|
|
}
|
|
|
|
RET(ff_formats_ref(outfmts, &cfg_out[0]->formats));
|
|
|
|
outfmts = s->colorspace > 0 ? ff_make_formats_list_singleton(s->colorspace)
|
|
: ff_all_color_spaces();
|
|
RET(ff_formats_ref(outfmts, &cfg_out[0]->color_spaces));
|
|
|
|
outfmts = s->color_range > 0 ? ff_make_formats_list_singleton(s->color_range)
|
|
: ff_all_color_ranges();
|
|
RET(ff_formats_ref(outfmts, &cfg_out[0]->color_ranges));
|
|
|
|
outfmts = s->alpha_mode > 0 ? ff_make_formats_list_singleton(s->alpha_mode)
|
|
: ff_all_alpha_modes();
|
|
RET(ff_formats_ref(outfmts, &cfg_out[0]->alpha_modes));
|
|
return 0;
|
|
|
|
fail:
|
|
if (infmts && !infmts->refcount)
|
|
ff_formats_unref(&infmts);
|
|
if (outfmts && !outfmts->refcount)
|
|
ff_formats_unref(&outfmts);
|
|
return err;
|
|
}
|
|
|
|
static int libplacebo_config_input(AVFilterLink *inlink)
|
|
{
|
|
AVFilterContext *avctx = inlink->dst;
|
|
LibplaceboContext *s = avctx->priv;
|
|
|
|
if (s->rotation % PL_ROTATION_180 == PL_ROTATION_90) {
|
|
/* Swap width and height for 90 degree rotations to make the size and
|
|
* scaling calculations work out correctly */
|
|
FFSWAP(int, inlink->w, inlink->h);
|
|
if (inlink->sample_aspect_ratio.num)
|
|
inlink->sample_aspect_ratio = av_inv_q(inlink->sample_aspect_ratio);
|
|
}
|
|
|
|
if (inlink->format == AV_PIX_FMT_VULKAN)
|
|
return ff_vk_filter_config_input(inlink);
|
|
|
|
/* Forward this to the vkctx for format selection */
|
|
s->vkctx.input_format = inlink->format;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline AVRational max_q(AVRational a, AVRational b)
|
|
{
|
|
return av_cmp_q(a, b) < 0 ? b : a;
|
|
}
|
|
|
|
static int libplacebo_config_output(AVFilterLink *outlink)
|
|
{
|
|
int err;
|
|
FilterLink *l = ff_filter_link(outlink);
|
|
AVFilterContext *avctx = outlink->src;
|
|
LibplaceboContext *s = avctx->priv;
|
|
AVFilterLink *inlink = outlink->src->inputs[0];
|
|
FilterLink *ol = ff_filter_link(outlink);
|
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
|
const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
|
|
AVHWFramesContext *hwfc;
|
|
AVVulkanFramesContext *vkfc;
|
|
|
|
/* Frame dimensions */
|
|
RET(ff_scale_eval_dimensions(s, s->w_expr, s->h_expr, inlink, outlink,
|
|
&outlink->w, &outlink->h));
|
|
|
|
s->reset_sar |= s->normalize_sar || s->nb_inputs > 1;
|
|
double sar_in = inlink->sample_aspect_ratio.num ?
|
|
av_q2d(inlink->sample_aspect_ratio) : 1.0;
|
|
|
|
ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h,
|
|
s->force_original_aspect_ratio,
|
|
s->force_divisible_by,
|
|
s->reset_sar ? sar_in : 1.0);
|
|
|
|
|
|
if (s->nb_inputs > 1 && !s->disable_fbos) {
|
|
/* Create a separate renderer and composition texture */
|
|
const enum pl_fmt_caps caps = PL_FMT_CAP_BLENDABLE | PL_FMT_CAP_BLITTABLE;
|
|
pl_fmt fmt = pl_find_fmt(s->gpu, PL_FMT_FLOAT, 4, 16, 0, caps);
|
|
bool ok = !!fmt;
|
|
if (ok) {
|
|
ok = pl_tex_recreate(s->gpu, &s->linear_tex, pl_tex_params(
|
|
.format = fmt,
|
|
.w = outlink->w,
|
|
.h = outlink->h,
|
|
.blit_dst = true,
|
|
.renderable = true,
|
|
.sampleable = true,
|
|
.storable = fmt->caps & PL_FMT_CAP_STORABLE,
|
|
));
|
|
}
|
|
|
|
if (!ok) {
|
|
av_log(avctx, AV_LOG_WARNING, "Failed to create a linear texture "
|
|
"for compositing multiple inputs, falling back to non-linear "
|
|
"blending.\n");
|
|
}
|
|
}
|
|
|
|
if (s->reset_sar) {
|
|
/* SAR is normalized, or we have multiple inputs, set out to 1:1 */
|
|
outlink->sample_aspect_ratio = (AVRational){ 1, 1 };
|
|
} else if (inlink->sample_aspect_ratio.num && s->fit_mode == FIT_FILL) {
|
|
/* This is consistent with other scale_* filters, which only
|
|
* set the outlink SAR to be equal to the scale SAR iff the input SAR
|
|
* was set to something nonzero */
|
|
const AVRational ar_in = { inlink->w, inlink->h };
|
|
const AVRational ar_out = { outlink->w, outlink->h };
|
|
const AVRational stretch = av_div_q(ar_in, ar_out);
|
|
outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio, stretch);
|
|
} else {
|
|
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
|
|
}
|
|
|
|
/* Frame rate */
|
|
if (s->fps.num) {
|
|
ol->frame_rate = s->fps;
|
|
outlink->time_base = av_inv_q(s->fps);
|
|
} else {
|
|
FilterLink *il = ff_filter_link(avctx->inputs[0]);
|
|
ol->frame_rate = il->frame_rate;
|
|
outlink->time_base = avctx->inputs[0]->time_base;
|
|
for (int i = 1; i < s->nb_inputs; i++) {
|
|
il = ff_filter_link(avctx->inputs[i]);
|
|
ol->frame_rate = max_q(ol->frame_rate, il->frame_rate);
|
|
outlink->time_base = av_gcd_q(outlink->time_base,
|
|
avctx->inputs[i]->time_base,
|
|
AV_TIME_BASE / 2, AV_TIME_BASE_Q);
|
|
}
|
|
|
|
if (s->deinterlace && s->send_fields) {
|
|
const AVRational q2 = { 2, 1 };
|
|
ol->frame_rate = av_mul_q(ol->frame_rate, q2);
|
|
/* Ensure output frame timestamps are divisible by two */
|
|
outlink->time_base = av_div_q(outlink->time_base, q2);
|
|
}
|
|
}
|
|
|
|
/* Static variables */
|
|
s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = outlink->w;
|
|
s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = outlink->h;
|
|
s->var_values[VAR_DAR] = outlink->sample_aspect_ratio.num ?
|
|
av_q2d(outlink->sample_aspect_ratio) : 1.0;
|
|
s->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
|
|
s->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
|
|
s->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
|
|
s->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
|
|
|
|
if (outlink->format != AV_PIX_FMT_VULKAN)
|
|
return 0;
|
|
|
|
s->vkctx.output_width = outlink->w;
|
|
s->vkctx.output_height = outlink->h;
|
|
/* Default to reusing the input format */
|
|
if (s->out_format == AV_PIX_FMT_NONE || s->out_format == AV_PIX_FMT_VULKAN) {
|
|
s->vkctx.output_format = s->vkctx.input_format;
|
|
} else {
|
|
s->vkctx.output_format = s->out_format;
|
|
}
|
|
RET(ff_vk_filter_config_output(outlink));
|
|
hwfc = (AVHWFramesContext *)l->hw_frames_ctx->data;
|
|
vkfc = hwfc->hwctx;
|
|
vkfc->usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
#define OFFSET(x) offsetof(LibplaceboContext, x)
|
|
#define STATIC (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
|
|
#define DYNAMIC (STATIC | AV_OPT_FLAG_RUNTIME_PARAM)
|
|
|
|
static const AVOption libplacebo_options[] = {
|
|
{ "inputs", "Number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags = STATIC },
|
|
{ "w", "Output video frame width", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, .flags = STATIC },
|
|
{ "h", "Output video frame height", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, .flags = STATIC },
|
|
{ "fps", "Output video frame rate", OFFSET(fps_string), AV_OPT_TYPE_STRING, {.str = "none"}, .flags = STATIC },
|
|
{ "crop_x", "Input video crop x", OFFSET(crop_x_expr), AV_OPT_TYPE_STRING, {.str = "(iw-cw)/2"}, .flags = DYNAMIC },
|
|
{ "crop_y", "Input video crop y", OFFSET(crop_y_expr), AV_OPT_TYPE_STRING, {.str = "(ih-ch)/2"}, .flags = DYNAMIC },
|
|
{ "crop_w", "Input video crop w", OFFSET(crop_w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, .flags = DYNAMIC },
|
|
{ "crop_h", "Input video crop h", OFFSET(crop_h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, .flags = DYNAMIC },
|
|
{ "pos_x", "Output video placement x", OFFSET(pos_x_expr), AV_OPT_TYPE_STRING, {.str = "(ow-pw)/2"}, .flags = DYNAMIC },
|
|
{ "pos_y", "Output video placement y", OFFSET(pos_y_expr), AV_OPT_TYPE_STRING, {.str = "(oh-ph)/2"}, .flags = DYNAMIC },
|
|
{ "pos_w", "Output video placement w", OFFSET(pos_w_expr), AV_OPT_TYPE_STRING, {.str = "ow"}, .flags = DYNAMIC },
|
|
{ "pos_h", "Output video placement h", OFFSET(pos_h_expr), AV_OPT_TYPE_STRING, {.str = "oh"}, .flags = DYNAMIC },
|
|
{ "format", "Output video format", OFFSET(out_format_string), AV_OPT_TYPE_STRING, .flags = STATIC },
|
|
{ "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, SCALE_FORCE_OAR_NB-1, STATIC, .unit = "force_oar" },
|
|
{ "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_DISABLE }, 0, 0, STATIC, .unit = "force_oar" },
|
|
{ "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_DECREASE }, 0, 0, STATIC, .unit = "force_oar" },
|
|
{ "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = SCALE_FORCE_OAR_INCREASE }, 0, 0, STATIC, .unit = "force_oar" },
|
|
{ "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, STATIC },
|
|
{ "reset_sar", "force SAR normalization to 1:1 by adjusting pos_x/y/w/h", OFFSET(reset_sar), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, STATIC },
|
|
{ "normalize_sar", "like reset_sar, but pad/crop instead of stretching the video", OFFSET(normalize_sar), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, STATIC },
|
|
{ "pad_crop_ratio", "ratio between padding and cropping when normalizing SAR (0=pad, 1=crop)", OFFSET(pad_crop_ratio), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, 1.0, DYNAMIC },
|
|
{ "fit_mode", "Content fit strategy for placing input layers in the output", OFFSET(fit_mode), AV_OPT_TYPE_INT, {.i64 = FIT_FILL }, 0, FIT_MODE_NB - 1, STATIC, .unit = "fit_mode" },
|
|
{ "fill", "Stretch content, ignoring aspect ratio", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_FILL }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "contain", "Stretch content, padding to preserve aspect", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_CONTAIN }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "cover", "Stretch content, cropping to preserve aspect", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_COVER }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "none", "Keep input unscaled, padding and cropping as needed", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_NONE }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "place", "Keep input unscaled, padding and cropping as needed", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_NONE }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "scale_down", "Downscale only if larger, padding to preserve aspect", 0, AV_OPT_TYPE_CONST, {.i64 = FIT_SCALE_DOWN }, 0, 0, STATIC, .unit = "fit_mode" },
|
|
{ "fillcolor", "Background fill color", OFFSET(fillcolor), AV_OPT_TYPE_COLOR, {.str = "black@0"}, .flags = DYNAMIC },
|
|
{ "corner_rounding", "Corner rounding radius", OFFSET(corner_rounding), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, 0.0, 1.0, .flags = DYNAMIC },
|
|
{ "lut", "Path to custom LUT file to apply", OFFSET(lut_filename), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = STATIC },
|
|
{ "lut_type", "Application mode of the custom LUT", OFFSET(lut_type), AV_OPT_TYPE_INT, { .i64 = PL_LUT_UNKNOWN }, 0, PL_LUT_CONVERSION, STATIC, .unit = "lut_type" },
|
|
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = PL_LUT_UNKNOWN }, 0, 0, STATIC, .unit = "lut_type" },
|
|
{ "native", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = PL_LUT_NATIVE }, 0, 0, STATIC, .unit = "lut_type" },
|
|
{ "normalized", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = PL_LUT_NORMALIZED }, 0, 0, STATIC, .unit = "lut_type" },
|
|
{ "conversion", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = PL_LUT_CONVERSION }, 0, 0, STATIC, .unit = "lut_type" },
|
|
|
|
{ "extra_opts", "Pass extra libplacebo-specific options using a :-separated list of key=value pairs", OFFSET(extra_opts), AV_OPT_TYPE_DICT, .flags = DYNAMIC },
|
|
#if PL_API_VER >= 351
|
|
{ "shader_cache", "Set shader cache path", OFFSET(shader_cache), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = STATIC },
|
|
#endif
|
|
|
|
{"colorspace", "select colorspace", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_SPC_NB-1, DYNAMIC, .unit = "colorspace"},
|
|
{"auto", "keep the same colorspace", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"gbr", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_RGB}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT709}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_UNSPECIFIED}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE170M}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE240M}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"ycgco", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_YCGCO}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"bt2020nc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_NCL}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"bt2020c", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_CL}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
{"ictcp", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_ICTCP}, INT_MIN, INT_MAX, STATIC, .unit = "colorspace"},
|
|
|
|
{"range", "select color range", OFFSET(color_range), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_RANGE_NB-1, DYNAMIC, .unit = "range"},
|
|
{"auto", "keep the same color range", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, STATIC, .unit = "range"},
|
|
{"unspecified", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED}, 0, 0, STATIC, .unit = "range"},
|
|
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED}, 0, 0, STATIC, .unit = "range"},
|
|
{"limited", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, STATIC, .unit = "range"},
|
|
{"tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, STATIC, .unit = "range"},
|
|
{"mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, STATIC, .unit = "range"},
|
|
{"full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, STATIC, .unit = "range"},
|
|
{"pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, STATIC, .unit = "range"},
|
|
{"jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, STATIC, .unit = "range"},
|
|
|
|
{"color_primaries", "select color primaries", OFFSET(color_primaries), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_PRI_NB-1, DYNAMIC, .unit = "color_primaries"},
|
|
{"auto", "keep the same color primaries", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT709}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_UNSPECIFIED}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470M}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470BG}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE170M}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE240M}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"film", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_FILM}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"bt2020", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT2020}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"smpte428", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE428}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"smpte431", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE431}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"smpte432", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE432}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"jedec-p22", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_JEDEC_P22}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
{"ebu3213", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_EBU3213}, INT_MIN, INT_MAX, STATIC, .unit = "color_primaries"},
|
|
|
|
{"color_trc", "select color transfer", OFFSET(color_trc), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_TRC_NB-1, DYNAMIC, .unit = "color_trc"},
|
|
{"auto", "keep the same color transfer", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT709}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_UNSPECIFIED}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA22}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA28}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE170M}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE240M}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_LINEAR}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"iec61966-2-4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_4}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt1361e", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT1361_ECG}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"iec61966-2-1", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_1}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt2020-10", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_10}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"bt2020-12", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_12}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"smpte2084", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE2084}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
{"arib-std-b67", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_ARIB_STD_B67}, INT_MIN, INT_MAX, STATIC, .unit = "color_trc"},
|
|
|
|
{"rotate", "rotate the input clockwise", OFFSET(rotation), AV_OPT_TYPE_INT, {.i64=PL_ROTATION_0}, PL_ROTATION_0, PL_ROTATION_360, DYNAMIC, .unit = "rotation"},
|
|
{"0", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PL_ROTATION_0}, .flags = STATIC, .unit = "rotation"},
|
|
{"90", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PL_ROTATION_90}, .flags = STATIC, .unit = "rotation"},
|
|
{"180", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PL_ROTATION_180}, .flags = STATIC, .unit = "rotation"},
|
|
{"270", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PL_ROTATION_270}, .flags = STATIC, .unit = "rotation"},
|
|
{"360", NULL, 0, AV_OPT_TYPE_CONST, {.i64=PL_ROTATION_360}, .flags = STATIC, .unit = "rotation"},
|
|
|
|
{"alpha_mode", "select alpha moda", OFFSET(alpha_mode), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVALPHA_MODE_NB-1, DYNAMIC, .unit = "alpha_mode"},
|
|
{"auto", "keep the same alpha mode", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, DYNAMIC, .unit = "alpha_mode"},
|
|
{"unspecified", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVALPHA_MODE_UNSPECIFIED}, 0, 0, DYNAMIC, .unit = "alpha_mode"},
|
|
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVALPHA_MODE_UNSPECIFIED}, 0, 0, DYNAMIC, .unit = "alpha_mode"},
|
|
{"premultiplied", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVALPHA_MODE_PREMULTIPLIED}, 0, 0, DYNAMIC, .unit = "alpha_mode"},
|
|
{"straight", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVALPHA_MODE_STRAIGHT}, 0, 0, DYNAMIC, .unit = "alpha_mode"},
|
|
|
|
{ "upscaler", "Upscaler function", OFFSET(upscaler), AV_OPT_TYPE_STRING, {.str = "spline36"}, .flags = DYNAMIC },
|
|
{ "downscaler", "Downscaler function", OFFSET(downscaler), AV_OPT_TYPE_STRING, {.str = "mitchell"}, .flags = DYNAMIC },
|
|
{ "frame_mixer", "Frame mixing function", OFFSET(frame_mixer), AV_OPT_TYPE_STRING, {.str = "none"}, .flags = DYNAMIC },
|
|
{ "antiringing", "Antiringing strength (for non-EWA filters)", OFFSET(antiringing), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, 0.0, 1.0, DYNAMIC },
|
|
{ "sigmoid", "Enable sigmoid upscaling", OFFSET(sigmoid), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, DYNAMIC },
|
|
{ "apply_filmgrain", "Apply film grain metadata", OFFSET(apply_filmgrain), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, DYNAMIC },
|
|
{ "apply_dolbyvision", "Apply Dolby Vision metadata", OFFSET(apply_dovi), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, DYNAMIC },
|
|
|
|
{ "deinterlace", "Deinterlacing mode", OFFSET(deinterlace), AV_OPT_TYPE_INT, {.i64 = PL_DEINTERLACE_WEAVE}, 0, PL_DEINTERLACE_ALGORITHM_COUNT - 1, DYNAMIC, .unit = "deinterlace" },
|
|
{ "weave", "Weave fields together (no-op)", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DEINTERLACE_WEAVE}, 0, 0, STATIC, .unit = "deinterlace" },
|
|
{ "bob", "Naive bob deinterlacing", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DEINTERLACE_BOB}, 0, 0, STATIC, .unit = "deinterlace" },
|
|
{ "yadif", "Yet another deinterlacing filter", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DEINTERLACE_YADIF}, 0, 0, STATIC, .unit = "deinterlace" },
|
|
#if PL_API_VER >= 353
|
|
{ "bwdif", "Bob weaver deinterlacing filter", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DEINTERLACE_BWDIF}, 0, 0, STATIC, .unit = "deinterlace" },
|
|
#endif
|
|
{ "skip_spatial_check", "Skip yadif spatial check", OFFSET(skip_spatial_check), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "send_fields", "Output a frame for each field", OFFSET(send_fields), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
|
|
{ "deband", "Enable debanding", OFFSET(deband), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "deband_iterations", "Deband iterations", OFFSET(deband_iterations), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 16, DYNAMIC },
|
|
{ "deband_threshold", "Deband threshold", OFFSET(deband_threshold), AV_OPT_TYPE_FLOAT, {.dbl = 4.0}, 0.0, 1024.0, DYNAMIC },
|
|
{ "deband_radius", "Deband radius", OFFSET(deband_radius), AV_OPT_TYPE_FLOAT, {.dbl = 16.0}, 0.0, 1024.0, DYNAMIC },
|
|
{ "deband_grain", "Deband grain", OFFSET(deband_grain), AV_OPT_TYPE_FLOAT, {.dbl = 6.0}, 0.0, 1024.0, DYNAMIC },
|
|
|
|
{ "brightness", "Brightness boost", OFFSET(brightness), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, -1.0, 1.0, DYNAMIC },
|
|
{ "contrast", "Contrast gain", OFFSET(contrast), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 16.0, DYNAMIC },
|
|
{ "saturation", "Saturation gain", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 16.0, DYNAMIC },
|
|
{ "hue", "Hue shift", OFFSET(hue), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, -M_PI, M_PI, DYNAMIC },
|
|
{ "gamma", "Gamma adjustment", OFFSET(gamma), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 16.0, DYNAMIC },
|
|
|
|
{ "peak_detect", "Enable dynamic peak detection for HDR tone-mapping", OFFSET(peakdetect), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, DYNAMIC },
|
|
{ "smoothing_period", "Peak detection smoothing period", OFFSET(smoothing), AV_OPT_TYPE_FLOAT, {.dbl = 100.0}, 0.0, 1000.0, DYNAMIC },
|
|
{ "scene_threshold_low", "Scene change low threshold", OFFSET(scene_low), AV_OPT_TYPE_FLOAT, {.dbl = 5.5}, -1.0, 100.0, DYNAMIC },
|
|
{ "scene_threshold_high", "Scene change high threshold", OFFSET(scene_high), AV_OPT_TYPE_FLOAT, {.dbl = 10.0}, -1.0, 100.0, DYNAMIC },
|
|
{ "percentile", "Peak detection percentile", OFFSET(percentile), AV_OPT_TYPE_FLOAT, {.dbl = 99.995}, 0.0, 100.0, DYNAMIC },
|
|
|
|
{ "gamut_mode", "Gamut-mapping mode", OFFSET(gamut_mode), AV_OPT_TYPE_INT, {.i64 = GAMUT_MAP_PERCEPTUAL}, 0, GAMUT_MAP_COUNT - 1, DYNAMIC, .unit = "gamut_mode" },
|
|
{ "clip", "Hard-clip (RGB per-channel)", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_CLIP}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "perceptual", "Colorimetric soft clipping", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_PERCEPTUAL}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "relative", "Relative colorimetric clipping", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_RELATIVE}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "saturation", "Saturation mapping (RGB -> RGB)", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_SATURATION}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "absolute", "Absolute colorimetric clipping", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_ABSOLUTE}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "desaturate", "Colorimetrically desaturate colors towards white", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_DESATURATE}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "darken", "Colorimetric clip with bias towards darkening image to fit gamut", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_DARKEN}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "warn", "Highlight out-of-gamut colors", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_HIGHLIGHT}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "linear", "Linearly reduce chromaticity to fit gamut", 0, AV_OPT_TYPE_CONST, {.i64 = GAMUT_MAP_LINEAR}, 0, 0, STATIC, .unit = "gamut_mode" },
|
|
{ "tonemapping", "Tone-mapping algorithm", OFFSET(tonemapping), AV_OPT_TYPE_INT, {.i64 = TONE_MAP_AUTO}, 0, TONE_MAP_COUNT - 1, DYNAMIC, .unit = "tonemap" },
|
|
{ "auto", "Automatic selection", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_AUTO}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "clip", "No tone mapping (clip", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_CLIP}, 0, 0, STATIC, .unit = "tonemap" },
|
|
#if PL_API_VER >= 246
|
|
{ "st2094-40", "SMPTE ST 2094-40", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_ST2094_40}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "st2094-10", "SMPTE ST 2094-10", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_ST2094_10}, 0, 0, STATIC, .unit = "tonemap" },
|
|
#endif
|
|
{ "bt.2390", "ITU-R BT.2390 EETF", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_BT2390}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "bt.2446a", "ITU-R BT.2446 Method A", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_BT2446A}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "spline", "Single-pivot polynomial spline", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_SPLINE}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "reinhard", "Reinhard", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_REINHARD}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "mobius", "Mobius", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_MOBIUS}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "hable", "Filmic tone-mapping (Hable)", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_HABLE}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "gamma", "Gamma function with knee", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_GAMMA}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "linear", "Perceptually linear stretch", 0, AV_OPT_TYPE_CONST, {.i64 = TONE_MAP_LINEAR}, 0, 0, STATIC, .unit = "tonemap" },
|
|
{ "tonemapping_param", "Tunable parameter for some tone-mapping functions", OFFSET(tonemapping_param), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, 0.0, 100.0, .flags = DYNAMIC },
|
|
{ "inverse_tonemapping", "Inverse tone mapping (range expansion)", OFFSET(inverse_tonemapping), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "tonemapping_lut_size", "Tone-mapping LUT size", OFFSET(tonemapping_lut_size), AV_OPT_TYPE_INT, {.i64 = 256}, 2, 1024, DYNAMIC },
|
|
{ "contrast_recovery", "HDR contrast recovery strength", OFFSET(contrast_recovery), AV_OPT_TYPE_FLOAT, {.dbl = 0.30}, 0.0, 3.0, DYNAMIC },
|
|
{ "contrast_smoothness", "HDR contrast recovery smoothness", OFFSET(contrast_smoothness), AV_OPT_TYPE_FLOAT, {.dbl = 3.50}, 1.0, 32.0, DYNAMIC },
|
|
|
|
{ "dithering", "Dither method to use", OFFSET(dithering), AV_OPT_TYPE_INT, {.i64 = PL_DITHER_BLUE_NOISE}, -1, PL_DITHER_METHOD_COUNT - 1, DYNAMIC, .unit = "dither" },
|
|
{ "none", "Disable dithering", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, STATIC, .unit = "dither" },
|
|
{ "blue", "Blue noise", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DITHER_BLUE_NOISE}, 0, 0, STATIC, .unit = "dither" },
|
|
{ "ordered", "Ordered LUT", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DITHER_ORDERED_LUT}, 0, 0, STATIC, .unit = "dither" },
|
|
{ "ordered_fixed", "Fixed function ordered", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DITHER_ORDERED_FIXED}, 0, 0, STATIC, .unit = "dither" },
|
|
{ "white", "White noise", 0, AV_OPT_TYPE_CONST, {.i64 = PL_DITHER_WHITE_NOISE}, 0, 0, STATIC, .unit = "dither" },
|
|
{ "dither_lut_size", "Dithering LUT size", OFFSET(dither_lut_size), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 8, STATIC },
|
|
{ "dither_temporal", "Enable temporal dithering", OFFSET(dither_temporal), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
|
|
{ "cones", "Colorblindness adaptation model", OFFSET(cones), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, PL_CONE_LMS, DYNAMIC, .unit = "cone" },
|
|
{ "l", "L cone", 0, AV_OPT_TYPE_CONST, {.i64 = PL_CONE_L}, 0, 0, STATIC, .unit = "cone" },
|
|
{ "m", "M cone", 0, AV_OPT_TYPE_CONST, {.i64 = PL_CONE_M}, 0, 0, STATIC, .unit = "cone" },
|
|
{ "s", "S cone", 0, AV_OPT_TYPE_CONST, {.i64 = PL_CONE_S}, 0, 0, STATIC, .unit = "cone" },
|
|
{ "cone-strength", "Colorblindness adaptation strength", OFFSET(cone_str), AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, 0.0, 10.0, DYNAMIC },
|
|
|
|
{ "custom_shader_path", "Path to custom user shader (mpv .hook format)", OFFSET(shader_path), AV_OPT_TYPE_STRING, .flags = STATIC },
|
|
{ "custom_shader_bin", "Custom user shader as binary (mpv .hook format)", OFFSET(shader_bin), AV_OPT_TYPE_BINARY, .flags = STATIC },
|
|
|
|
/* Performance/quality tradeoff options */
|
|
{ "skip_aa", "Skip anti-aliasing", OFFSET(skip_aa), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "disable_linear", "Disable linear scaling", OFFSET(disable_linear), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "disable_builtin", "Disable built-in scalers", OFFSET(disable_builtin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "force_dither", "Force dithering", OFFSET(force_dither), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ "disable_fbos", "Force-disable FBOs", OFFSET(disable_fbos), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
|
|
{ NULL },
|
|
};
|
|
|
|
AVFILTER_DEFINE_CLASS(libplacebo);
|
|
|
|
static const AVFilterPad libplacebo_outputs[] = {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
.config_props = &libplacebo_config_output,
|
|
},
|
|
};
|
|
|
|
const FFFilter ff_vf_libplacebo = {
|
|
.p.name = "libplacebo",
|
|
.p.description = NULL_IF_CONFIG_SMALL("Apply various GPU filters from libplacebo"),
|
|
.p.priv_class = &libplacebo_class,
|
|
.p.flags = AVFILTER_FLAG_HWDEVICE | AVFILTER_FLAG_DYNAMIC_INPUTS,
|
|
.priv_size = sizeof(LibplaceboContext),
|
|
.init = &libplacebo_init,
|
|
.uninit = &libplacebo_uninit,
|
|
.activate = &libplacebo_activate,
|
|
.process_command = &libplacebo_process_command,
|
|
FILTER_OUTPUTS(libplacebo_outputs),
|
|
FILTER_QUERY_FUNC2(libplacebo_query_format),
|
|
.flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
|
|
};
|