From 19214f005140b0ee7f706509bd3fab47f4af9b90 Mon Sep 17 00:00:00 2001 From: Jun Zhao Date: Mon, 8 Jan 2018 16:02:35 +0800 Subject: [PATCH] lavfi: use common VAAPI VPP infrastructure for vf_scale_vaapi. Signed-off-by: Jun Zhao Signed-off-by: Mark Thompson --- libavfilter/Makefile | 2 +- libavfilter/vf_scale_vaapi.c | 356 ++++------------------------------- 2 files changed, 42 insertions(+), 316 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index ef4729dd3f..3d8dd2c890 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -296,7 +296,7 @@ OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale.o OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale.o OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_scale_qsv.o -OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale.o +OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale.o vaapi_vpp.o OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale.o OBJS-$(CONFIG_SELECT_FILTER) += f_select.o OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o diff --git a/libavfilter/vf_scale_vaapi.c b/libavfilter/vf_scale_vaapi.c index 4bead5aaf4..d349ff0f90 100644 --- a/libavfilter/vf_scale_vaapi.c +++ b/libavfilter/vf_scale_vaapi.c @@ -18,12 +18,7 @@ #include -#include -#include - #include "libavutil/avassert.h" -#include "libavutil/hwcontext.h" -#include "libavutil/hwcontext_vaapi.h" #include "libavutil/mem.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" @@ -33,276 +28,74 @@ #include "internal.h" #include "scale.h" #include "video.h" +#include "vaapi_vpp.h" typedef struct ScaleVAAPIContext { - const AVClass *class; - - AVVAAPIDeviceContext *hwctx; - AVBufferRef *device_ref; - - int valid_ids; - VAConfigID va_config; - VAContextID va_context; - - AVBufferRef *input_frames_ref; - AVHWFramesContext *input_frames; - - AVBufferRef *output_frames_ref; - AVHWFramesContext *output_frames; + VAAPIVPPContext vpp_ctx; // must be the first fileld char *output_format_string; - enum AVPixelFormat output_format; char *w_expr; // width expression string char *h_expr; // height expression string - - int output_width; // computed width - int output_height; // computed height } ScaleVAAPIContext; - -static int scale_vaapi_query_formats(AVFilterContext *avctx) -{ - enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, - }; - int err; - - if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), - &avctx->inputs[0]->out_formats)) < 0) - return err; - if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), - &avctx->outputs[0]->in_formats)) < 0) - return err; - - return 0; -} - -static int scale_vaapi_pipeline_uninit(ScaleVAAPIContext *ctx) -{ - if (ctx->va_context != VA_INVALID_ID) { - vaDestroyContext(ctx->hwctx->display, ctx->va_context); - ctx->va_context = VA_INVALID_ID; - } - - if (ctx->va_config != VA_INVALID_ID) { - vaDestroyConfig(ctx->hwctx->display, ctx->va_config); - ctx->va_config = VA_INVALID_ID; - } - - av_buffer_unref(&ctx->output_frames_ref); - av_buffer_unref(&ctx->device_ref); - ctx->hwctx = 0; - - return 0; -} - -static int scale_vaapi_config_input(AVFilterLink *inlink) -{ - AVFilterContext *avctx = inlink->dst; - ScaleVAAPIContext *ctx = avctx->priv; - - scale_vaapi_pipeline_uninit(ctx); - - if (!inlink->hw_frames_ctx) { - av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " - "required to associate the processing device.\n"); - return AVERROR(EINVAL); - } - - ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); - ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; - - return 0; -} - static int scale_vaapi_config_output(AVFilterLink *outlink) { - AVFilterLink *inlink = outlink->src->inputs[0]; - AVFilterContext *avctx = outlink->src; - ScaleVAAPIContext *ctx = avctx->priv; - AVVAAPIHWConfig *hwconfig = NULL; - AVHWFramesConstraints *constraints = NULL; - AVVAAPIFramesContext *va_frames; - VAStatus vas; - int err, i; - - scale_vaapi_pipeline_uninit(ctx); - - ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); - ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; - - av_assert0(ctx->va_config == VA_INVALID_ID); - vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, - VAEntrypointVideoProc, 0, 0, &ctx->va_config); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " - "config: %d (%s).\n", vas, vaErrorStr(vas)); - err = AVERROR(EIO); - goto fail; - } - - hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); - if (!hwconfig) { - err = AVERROR(ENOMEM); - goto fail; - } - hwconfig->config_id = ctx->va_config; - - constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, - hwconfig); - if (!constraints) { - err = AVERROR(ENOMEM); - goto fail; - } - - if (ctx->output_format == AV_PIX_FMT_NONE) - ctx->output_format = ctx->input_frames->sw_format; - if (constraints->valid_sw_formats) { - for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { - if (ctx->output_format == constraints->valid_sw_formats[i]) - break; - } - if (constraints->valid_sw_formats[i] == AV_PIX_FMT_NONE) { - av_log(ctx, AV_LOG_ERROR, "Hardware does not support output " - "format %s.\n", av_get_pix_fmt_name(ctx->output_format)); - err = AVERROR(EINVAL); - goto fail; - } - } + AVFilterLink *inlink = outlink->src->inputs[0]; + AVFilterContext *avctx = outlink->src; + VAAPIVPPContext *vpp_ctx = avctx->priv; + ScaleVAAPIContext *ctx = avctx->priv; + int err; if ((err = ff_scale_eval_dimensions(ctx, ctx->w_expr, ctx->h_expr, inlink, outlink, - &ctx->output_width, &ctx->output_height)) < 0) - goto fail; + &vpp_ctx->output_width, &vpp_ctx->output_height)) < 0) + return err; - if (ctx->output_width < constraints->min_width || - ctx->output_height < constraints->min_height || - ctx->output_width > constraints->max_width || - ctx->output_height > constraints->max_height) { - av_log(ctx, AV_LOG_ERROR, "Hardware does not support scaling to " - "size %dx%d (constraints: width %d-%d height %d-%d).\n", - ctx->output_width, ctx->output_height, - constraints->min_width, constraints->max_width, - constraints->min_height, constraints->max_height); - err = AVERROR(EINVAL); - goto fail; - } - - ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); - if (!ctx->output_frames_ref) { - av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " - "for output.\n"); - err = AVERROR(ENOMEM); - goto fail; - } - - ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; - - ctx->output_frames->format = AV_PIX_FMT_VAAPI; - ctx->output_frames->sw_format = ctx->output_format; - ctx->output_frames->width = ctx->output_width; - ctx->output_frames->height = ctx->output_height; - - // The number of output frames we need is determined by what follows - // the filter. If it's an encoder with complex frame reference - // structures then this could be very high. - ctx->output_frames->initial_pool_size = 10; - - err = av_hwframe_ctx_init(ctx->output_frames_ref); - if (err < 0) { - av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " - "context for output: %d\n", err); - goto fail; - } - - va_frames = ctx->output_frames->hwctx; - - av_assert0(ctx->va_context == VA_INVALID_ID); - vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, - ctx->output_width, ctx->output_height, - VA_PROGRESSIVE, - va_frames->surface_ids, va_frames->nb_surfaces, - &ctx->va_context); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " - "context: %d (%s).\n", vas, vaErrorStr(vas)); - return AVERROR(EIO); - } - - outlink->w = ctx->output_width; - outlink->h = ctx->output_height; - - outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); - if (!outlink->hw_frames_ctx) { - err = AVERROR(ENOMEM); - goto fail; - } + err = ff_vaapi_vpp_config_output(outlink); + if (err < 0) + return err; if (inlink->sample_aspect_ratio.num) outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio); else outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; - av_freep(&hwconfig); - av_hwframe_constraints_free(&constraints); return 0; - -fail: - av_buffer_unref(&ctx->output_frames_ref); - av_freep(&hwconfig); - av_hwframe_constraints_free(&constraints); - return err; -} - -static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) -{ - switch(av_cs) { -#define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; - CS(BT709, BT709); - CS(BT470BG, BT601); - CS(SMPTE170M, SMPTE170M); - CS(SMPTE240M, SMPTE240M); -#undef CS - default: - return VAProcColorStandardNone; - } } static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) { - AVFilterContext *avctx = inlink->dst; - AVFilterLink *outlink = avctx->outputs[0]; - ScaleVAAPIContext *ctx = avctx->priv; - AVFrame *output_frame = NULL; + AVFilterContext *avctx = inlink->dst; + AVFilterLink *outlink = avctx->outputs[0]; + VAAPIVPPContext *vpp_ctx = avctx->priv; + AVFrame *output_frame = NULL; VASurfaceID input_surface, output_surface; VAProcPipelineParameterBuffer params; - VABufferID params_id; VARectangle input_region; - VAStatus vas; int err; - av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", + av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", av_get_pix_fmt_name(input_frame->format), input_frame->width, input_frame->height, input_frame->pts); - if (ctx->va_context == VA_INVALID_ID) + if (vpp_ctx->va_context == VA_INVALID_ID) return AVERROR(EINVAL); input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; - av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", + av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", input_surface); - output_frame = ff_get_video_buffer(outlink, ctx->output_width, - ctx->output_height); + output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width, + vpp_ctx->output_height); if (!output_frame) { err = AVERROR(ENOMEM); goto fail; } output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; - av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", + av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", output_surface); memset(¶ms, 0, sizeof(params)); @@ -319,7 +112,7 @@ static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) params.surface = input_surface; params.surface_region = &input_region; params.surface_color_standard = - vaapi_proc_colour_standard(input_frame->colorspace); + ff_vaapi_vpp_colour_standard(input_frame->colorspace); params.output_region = 0; params.output_background_color = 0xff000000; @@ -328,71 +121,22 @@ static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) params.pipeline_flags = 0; params.filter_flags = VA_FILTER_SCALING_HQ; - vas = vaBeginPicture(ctx->hwctx->display, - ctx->va_context, output_surface); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: " - "%d (%s).\n", vas, vaErrorStr(vas)); - err = AVERROR(EIO); + err = ff_vaapi_vpp_render_picture(avctx, ¶ms, output_surface); + if (err < 0) goto fail; - } - vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, - VAProcPipelineParameterBufferType, - sizeof(params), 1, ¶ms, ¶ms_id); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer: " - "%d (%s).\n", vas, vaErrorStr(vas)); - err = AVERROR(EIO); - goto fail_after_begin; - } - av_log(ctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", - params_id); + err = av_frame_copy_props(output_frame, input_frame); + if (err < 0) + goto fail; - vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, - ¶ms_id, 1); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to render parameter buffer: " - "%d (%s).\n", vas, vaErrorStr(vas)); - err = AVERROR(EIO); - goto fail_after_begin; - } - - vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: " - "%d (%s).\n", vas, vaErrorStr(vas)); - err = AVERROR(EIO); - goto fail_after_render; - } - - if (CONFIG_VAAPI_1 || ctx->hwctx->driver_quirks & - AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { - vas = vaDestroyBuffer(ctx->hwctx->display, params_id); - if (vas != VA_STATUS_SUCCESS) { - av_log(ctx, AV_LOG_ERROR, "Failed to free parameter buffer: " - "%d (%s).\n", vas, vaErrorStr(vas)); - // And ignore. - } - } - - av_frame_copy_props(output_frame, input_frame); av_frame_free(&input_frame); - av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", + av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", av_get_pix_fmt_name(output_frame->format), output_frame->width, output_frame->height, output_frame->pts); return ff_filter_frame(outlink, output_frame); - // We want to make sure that if vaBeginPicture has been called, we also - // call vaRenderPicture and vaEndPicture. These calls may well fail or - // do something else nasty, but once we're in this failure case there - // isn't much else we can do. -fail_after_begin: - vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); -fail_after_render: - vaEndPicture(ctx->hwctx->display, ctx->va_context); fail: av_frame_free(&input_frame); av_frame_free(&output_frame); @@ -401,39 +145,26 @@ fail: static av_cold int scale_vaapi_init(AVFilterContext *avctx) { - ScaleVAAPIContext *ctx = avctx->priv; + VAAPIVPPContext *vpp_ctx = avctx->priv; + ScaleVAAPIContext *ctx = avctx->priv; - ctx->va_config = VA_INVALID_ID; - ctx->va_context = VA_INVALID_ID; - ctx->valid_ids = 1; + ff_vaapi_vpp_ctx_init(avctx); + vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit; if (ctx->output_format_string) { - ctx->output_format = av_get_pix_fmt(ctx->output_format_string); - if (ctx->output_format == AV_PIX_FMT_NONE) { - av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); + vpp_ctx->output_format = av_get_pix_fmt(ctx->output_format_string); + if (vpp_ctx->output_format == AV_PIX_FMT_NONE) { + av_log(avctx, AV_LOG_ERROR, "Invalid output format.\n"); return AVERROR(EINVAL); } } else { // Use the input format once that is configured. - ctx->output_format = AV_PIX_FMT_NONE; + vpp_ctx->output_format = AV_PIX_FMT_NONE; } return 0; } -static av_cold void scale_vaapi_uninit(AVFilterContext *avctx) -{ - ScaleVAAPIContext *ctx = avctx->priv; - - if (ctx->valid_ids) - scale_vaapi_pipeline_uninit(ctx); - - av_buffer_unref(&ctx->input_frames_ref); - av_buffer_unref(&ctx->output_frames_ref); - av_buffer_unref(&ctx->device_ref); -} - - #define OFFSET(x) offsetof(ScaleVAAPIContext, x) #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM) static const AVOption scale_vaapi_options[] = { @@ -446,19 +177,14 @@ static const AVOption scale_vaapi_options[] = { { NULL }, }; -static const AVClass scale_vaapi_class = { - .class_name = "scale_vaapi", - .item_name = av_default_item_name, - .option = scale_vaapi_options, - .version = LIBAVUTIL_VERSION_INT, -}; +AVFILTER_DEFINE_CLASS(scale_vaapi); static const AVFilterPad scale_vaapi_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = &scale_vaapi_filter_frame, - .config_props = &scale_vaapi_config_input, + .config_props = &ff_vaapi_vpp_config_input, }, { NULL } }; @@ -477,8 +203,8 @@ AVFilter ff_vf_scale_vaapi = { .description = NULL_IF_CONFIG_SMALL("Scale to/from VAAPI surfaces."), .priv_size = sizeof(ScaleVAAPIContext), .init = &scale_vaapi_init, - .uninit = &scale_vaapi_uninit, - .query_formats = &scale_vaapi_query_formats, + .uninit = &ff_vaapi_vpp_ctx_uninit, + .query_formats = &ff_vaapi_vpp_query_formats, .inputs = scale_vaapi_inputs, .outputs = scale_vaapi_outputs, .priv_class = &scale_vaapi_class,