mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-02-04 06:08:26 +02:00
lavfi/vf_scale: allow passing options to swscale directly
Avoid using the dictionary mechanism, which is non-introspectable. As this is the only user of AVFilter.init_dict(), this callback can now be removed.
This commit is contained in:
parent
260f391893
commit
018a8b9dc3
@ -23,6 +23,7 @@
|
||||
* scale video filter
|
||||
*/
|
||||
|
||||
#include <float.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
@ -107,7 +108,8 @@ typedef struct ScaleContext {
|
||||
const AVClass *class;
|
||||
struct SwsContext *sws; ///< software scaler context
|
||||
struct SwsContext *isws[2]; ///< software scaler context for interlaced material
|
||||
AVDictionary *opts;
|
||||
// context used for forwarding options to sws
|
||||
struct SwsContext *sws_opts;
|
||||
|
||||
/**
|
||||
* New dimensions. Special values are:
|
||||
@ -117,7 +119,6 @@ typedef struct ScaleContext {
|
||||
*/
|
||||
int w, h;
|
||||
char *size_str;
|
||||
unsigned int flags; ///sws flags
|
||||
double param[2]; // sws params
|
||||
|
||||
int hsub, vsub; ///< chroma subsampling
|
||||
@ -268,11 +269,29 @@ revert:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
|
||||
static av_cold int preinit(AVFilterContext *ctx)
|
||||
{
|
||||
ScaleContext *scale = ctx->priv;
|
||||
int ret;
|
||||
|
||||
scale->sws_opts = sws_alloc_context();
|
||||
if (!scale->sws_opts)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// set threads=0, so we can later check whether the user modified it
|
||||
ret = av_opt_set_int(scale->sws_opts, "threads", 0, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx)
|
||||
{
|
||||
ScaleContext *scale = ctx->priv;
|
||||
int64_t threads;
|
||||
int ret;
|
||||
|
||||
if (scale->size_str && (scale->w_expr || scale->h_expr)) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Size and width/height expressions cannot be set at the same time.\n");
|
||||
@ -310,18 +329,26 @@ static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
|
||||
scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
|
||||
|
||||
scale->flags = 0;
|
||||
|
||||
if (scale->flags_str && *scale->flags_str) {
|
||||
const AVClass *class = sws_get_class();
|
||||
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
|
||||
AV_OPT_SEARCH_FAKE_OBJ);
|
||||
int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
|
||||
ret = av_opt_set(scale->sws_opts, "sws_flags", scale->flags_str, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
scale->opts = *opts;
|
||||
*opts = NULL;
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(scale->param); i++)
|
||||
if (scale->param[i] != DBL_MAX) {
|
||||
ret = av_opt_set_double(scale->sws_opts, i ? "param1" : "param0",
|
||||
scale->param[i], 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// use generic thread-count if the user did not set it explicitly
|
||||
ret = av_opt_get_int(scale->sws_opts, "threads", 0, &threads);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!threads)
|
||||
av_opt_set_int(scale->sws_opts, "threads", ff_filter_get_nb_threads(ctx), 0);
|
||||
|
||||
scale->in_frame_range = AVCOL_RANGE_UNSPECIFIED;
|
||||
|
||||
@ -334,11 +361,11 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
av_expr_free(scale->w_pexpr);
|
||||
av_expr_free(scale->h_pexpr);
|
||||
scale->w_pexpr = scale->h_pexpr = NULL;
|
||||
sws_freeContext(scale->sws_opts);
|
||||
sws_freeContext(scale->sws);
|
||||
sws_freeContext(scale->isws[0]);
|
||||
sws_freeContext(scale->isws[1]);
|
||||
scale->sws = NULL;
|
||||
av_dict_free(&scale->opts);
|
||||
}
|
||||
|
||||
static int query_formats(AVFilterContext *ctx)
|
||||
@ -486,6 +513,7 @@ static int config_props(AVFilterLink *outlink)
|
||||
enum AVPixelFormat outfmt = outlink->format;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
ScaleContext *scale = ctx->priv;
|
||||
uint8_t *flags_val = NULL;
|
||||
int ret;
|
||||
|
||||
if ((ret = scale_eval_dimensions(ctx)) < 0)
|
||||
@ -534,16 +562,16 @@ static int config_props(AVFilterLink *outlink)
|
||||
return AVERROR(ENOMEM);
|
||||
*swscs[i] = s;
|
||||
|
||||
ret = av_opt_copy(s, scale->sws_opts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
av_opt_set_int(s, "srcw", inlink0 ->w, 0);
|
||||
av_opt_set_int(s, "srch", inlink0 ->h >> !!i, 0);
|
||||
av_opt_set_int(s, "src_format", inlink0->format, 0);
|
||||
av_opt_set_int(s, "dstw", outlink->w, 0);
|
||||
av_opt_set_int(s, "dsth", outlink->h >> !!i, 0);
|
||||
av_opt_set_int(s, "dst_format", outfmt, 0);
|
||||
av_opt_set_int(s, "sws_flags", scale->flags, 0);
|
||||
av_opt_set_int(s, "param0", scale->param[0], 0);
|
||||
av_opt_set_int(s, "param1", scale->param[1], 0);
|
||||
av_opt_set_int(s, "threads", ff_filter_get_nb_threads(ctx), 0);
|
||||
if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
|
||||
av_opt_set_int(s, "src_range",
|
||||
scale->in_range == AVCOL_RANGE_JPEG, 0);
|
||||
@ -554,13 +582,6 @@ static int config_props(AVFilterLink *outlink)
|
||||
av_opt_set_int(s, "dst_range",
|
||||
scale->out_range == AVCOL_RANGE_JPEG, 0);
|
||||
|
||||
if (scale->opts) {
|
||||
const AVDictionaryEntry *e = NULL;
|
||||
while ((e = av_dict_iterate(scale->opts, e))) {
|
||||
if ((ret = av_opt_set(s, e->key, e->value, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
/* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
|
||||
* MPEG-2 chroma positions are used by convention
|
||||
* XXX: support other 4:2:0 pixel formats */
|
||||
@ -589,12 +610,17 @@ static int config_props(AVFilterLink *outlink)
|
||||
} else
|
||||
outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
|
||||
if (scale->sws)
|
||||
av_opt_get(scale->sws, "sws_flags", 0, &flags_val);
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:%s\n",
|
||||
inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
|
||||
inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
|
||||
outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
|
||||
outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
|
||||
scale->flags);
|
||||
flags_val);
|
||||
av_freep(&flags_val);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -927,6 +953,14 @@ static const AVClass *child_class_iterate(void **iter)
|
||||
return c;
|
||||
}
|
||||
|
||||
static void *child_next(void *obj, void *prev)
|
||||
{
|
||||
ScaleContext *s = obj;
|
||||
if (!prev)
|
||||
return s->sws_opts;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(ScaleContext, x)
|
||||
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
|
||||
#define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
|
||||
@ -969,8 +1003,8 @@ static const AVOption scale_options[] = {
|
||||
{ "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
|
||||
{ "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
|
||||
{ "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
|
||||
{ "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
|
||||
{ "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
|
||||
{ "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
|
||||
{ "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
|
||||
{ "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
|
||||
{ "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
|
||||
{ "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
|
||||
@ -984,6 +1018,7 @@ static const AVClass scale_class = {
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.category = AV_CLASS_CATEGORY_FILTER,
|
||||
.child_class_iterate = child_class_iterate,
|
||||
.child_next = child_next,
|
||||
};
|
||||
|
||||
static const AVFilterPad avfilter_vf_scale_inputs[] = {
|
||||
@ -1005,7 +1040,8 @@ static const AVFilterPad avfilter_vf_scale_outputs[] = {
|
||||
const AVFilter ff_vf_scale = {
|
||||
.name = "scale",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
|
||||
.init_dict = init_dict,
|
||||
.preinit = preinit,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(ScaleContext),
|
||||
.priv_class = &scale_class,
|
||||
@ -1046,7 +1082,8 @@ static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
|
||||
const AVFilter ff_vf_scale2ref = {
|
||||
.name = "scale2ref",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
|
||||
.init_dict = init_dict,
|
||||
.preinit = preinit,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(ScaleContext),
|
||||
.priv_class = &scale_class,
|
||||
|
Loading…
x
Reference in New Issue
Block a user