mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-07 11:13:41 +02:00
8c21f1e3b7
This patch trying to resolve mulitiple issues related to parameter configuration: Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should be the common options of backend. Secondly, backend options are hidden behind the scene. It's a AV_OPT_TYPE_STRING backend_configs for user, and parsed by each backend. We don't know each backend support what kind of options from the help message. Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS. Last but not the least, pass backend options via AV_OPT_TYPE_STRING makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible. This patch puts backend common options and each backend options inside DnnContext to reduce code duplication, make options user friendly, and easy to extend for future usecase. For example, ./ffmpeg -h filter=dnn_processing dnn_processing AVOptions: dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow) tensorflow 1 ..FV....... tensorflow backend flag openvino 2 ..FV....... openvino backend flag torch 3 ..FV....... torch backend flag dnn_base AVOptions: model <string> ..F........ path to model file input <string> ..F........ input name of the model output <string> ..F........ output name of the model backend_configs <string> ..F.......P backend configs (deprecated) options <string> ..F.......P backend configs (deprecated) nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0) async <boolean> ..F........ use DNN async inference (default true) device <string> ..F........ device to run model dnn_tensorflow AVOptions: sess_config <string> ..F........ config for SessionOptions dnn_openvino AVOptions: batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1) input_resizable <boolean> ..F........ can input be resizable or not (default false) layout <int> ..F........ input layout of model (from 0 to 2) (default none) none 0 ..F........ none nchw 1 ..F........ nchw nhwc 2 ..F........ nhwc scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0) mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0) dnn_th AVOptions: optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0) Signed-off-by: Zhao Zhili <zhilizhao@tencent.com> Reviewed-by: Wenbin Chen <wenbin.chen@intel.com> Reviewed-by: Guo Yejun <yejun.guo@intel.com>
200 lines
6.7 KiB
C
200 lines
6.7 KiB
C
/*
|
|
* Copyright (c) 2018 Sergey Lavrushkin
|
|
*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* Filter implementing image super-resolution using deep convolutional networks.
|
|
* https://arxiv.org/abs/1501.00092
|
|
* https://arxiv.org/abs/1609.05158
|
|
*/
|
|
|
|
#include "avfilter.h"
|
|
#include "internal.h"
|
|
#include "video.h"
|
|
#include "libavutil/opt.h"
|
|
#include "libavutil/pixdesc.h"
|
|
#include "libswscale/swscale.h"
|
|
#include "dnn_filter_common.h"
|
|
|
|
typedef struct SRContext {
|
|
const AVClass *class;
|
|
DnnContext dnnctx;
|
|
int scale_factor;
|
|
struct SwsContext *sws_uv_scale;
|
|
int sws_uv_height;
|
|
struct SwsContext *sws_pre_scale;
|
|
} SRContext;
|
|
|
|
#define OFFSET(x) offsetof(SRContext, x)
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
|
|
static const AVOption sr_options[] = {
|
|
{ "dnn_backend", "DNN backend used for model execution", OFFSET(dnnctx.backend_type), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, .unit = "backend" },
|
|
#if (CONFIG_LIBTENSORFLOW == 1)
|
|
{ "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, .unit = "backend" },
|
|
#endif
|
|
{ "scale_factor", "scale factor for SRCNN model", OFFSET(scale_factor), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, 4, FLAGS },
|
|
{ NULL }
|
|
};
|
|
|
|
AVFILTER_DNN_DEFINE_CLASS(sr);
|
|
|
|
static av_cold int init(AVFilterContext *context)
|
|
{
|
|
SRContext *sr_context = context->priv;
|
|
return ff_dnn_init(&sr_context->dnnctx, DFT_PROCESS_FRAME, context);
|
|
}
|
|
|
|
static const enum AVPixelFormat pixel_formats[] = {
|
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
|
|
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
|
|
AV_PIX_FMT_NONE
|
|
};
|
|
|
|
static int config_output(AVFilterLink *outlink)
|
|
{
|
|
AVFilterContext *context = outlink->src;
|
|
SRContext *ctx = context->priv;
|
|
int result;
|
|
AVFilterLink *inlink = context->inputs[0];
|
|
int out_width, out_height;
|
|
|
|
// have a try run in case that the dnn model resize the frame
|
|
result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &out_width, &out_height);
|
|
if (result != 0) {
|
|
av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
|
|
return result;
|
|
}
|
|
|
|
if (inlink->w != out_width || inlink->h != out_height) {
|
|
//espcn
|
|
outlink->w = out_width;
|
|
outlink->h = out_height;
|
|
if (inlink->format != AV_PIX_FMT_GRAY8){
|
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
|
int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
|
|
int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
|
|
int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
|
|
int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
|
|
ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
|
|
sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
|
|
SWS_BICUBIC, NULL, NULL, NULL);
|
|
ctx->sws_uv_height = sws_src_h;
|
|
}
|
|
} else {
|
|
//srcnn
|
|
outlink->w = out_width * ctx->scale_factor;
|
|
outlink->h = out_height * ctx->scale_factor;
|
|
ctx->sws_pre_scale = sws_getContext(inlink->w, inlink->h, inlink->format,
|
|
outlink->w, outlink->h, outlink->format,
|
|
SWS_BICUBIC, NULL, NULL, NULL);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
|
{
|
|
DNNAsyncStatusType async_state = 0;
|
|
AVFilterContext *context = inlink->dst;
|
|
SRContext *ctx = context->priv;
|
|
AVFilterLink *outlink = context->outputs[0];
|
|
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
|
int dnn_result;
|
|
|
|
if (!out){
|
|
av_log(context, AV_LOG_ERROR, "could not allocate memory for output frame\n");
|
|
av_frame_free(&in);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
av_frame_copy_props(out, in);
|
|
|
|
if (ctx->sws_pre_scale) {
|
|
sws_scale(ctx->sws_pre_scale,
|
|
(const uint8_t **)in->data, in->linesize, 0, in->height,
|
|
out->data, out->linesize);
|
|
dnn_result = ff_dnn_execute_model(&ctx->dnnctx, out, out);
|
|
} else {
|
|
dnn_result = ff_dnn_execute_model(&ctx->dnnctx, in, out);
|
|
}
|
|
|
|
if (dnn_result != 0){
|
|
av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n");
|
|
av_frame_free(&in);
|
|
av_frame_free(&out);
|
|
return dnn_result;
|
|
}
|
|
|
|
do {
|
|
async_state = ff_dnn_get_result(&ctx->dnnctx, &in, &out);
|
|
} while (async_state == DAST_NOT_READY);
|
|
|
|
if (async_state != DAST_SUCCESS)
|
|
return AVERROR(EINVAL);
|
|
|
|
if (ctx->sws_uv_scale) {
|
|
sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
|
|
0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
|
|
sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2,
|
|
0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
|
|
}
|
|
if (in != out) {
|
|
av_frame_free(&in);
|
|
}
|
|
return ff_filter_frame(outlink, out);
|
|
}
|
|
|
|
static av_cold void uninit(AVFilterContext *context)
|
|
{
|
|
SRContext *sr_context = context->priv;
|
|
|
|
ff_dnn_uninit(&sr_context->dnnctx);
|
|
sws_freeContext(sr_context->sws_uv_scale);
|
|
sws_freeContext(sr_context->sws_pre_scale);
|
|
}
|
|
|
|
static const AVFilterPad sr_inputs[] = {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
.filter_frame = filter_frame,
|
|
},
|
|
};
|
|
|
|
static const AVFilterPad sr_outputs[] = {
|
|
{
|
|
.name = "default",
|
|
.config_props = config_output,
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
},
|
|
};
|
|
|
|
const AVFilter ff_vf_sr = {
|
|
.name = "sr",
|
|
.description = NULL_IF_CONFIG_SMALL("Apply DNN-based image super resolution to the input."),
|
|
.priv_size = sizeof(SRContext),
|
|
.preinit = ff_dnn_filter_init_child_class,
|
|
.init = init,
|
|
.uninit = uninit,
|
|
FILTER_INPUTS(sr_inputs),
|
|
FILTER_OUTPUTS(sr_outputs),
|
|
FILTER_PIXFMTS_ARRAY(pixel_formats),
|
|
.priv_class = &sr_class,
|
|
};
|