mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-24 13:56:33 +02:00
lavfi/dnn: refine code for frame pre/proc processing
This commit is contained in:
parent
d2ccbc966b
commit
59021d79a2
@ -310,8 +310,8 @@ static DNNReturnType execute_model_native(const DNNModel *model, const char *inp
|
||||
input.data = oprd->data;
|
||||
input.dt = oprd->data_type;
|
||||
if (do_ioproc) {
|
||||
if (native_model->model->pre_proc != NULL) {
|
||||
native_model->model->pre_proc(in_frame, &input, native_model->model->filter_ctx);
|
||||
if (native_model->model->frame_pre_proc != NULL) {
|
||||
native_model->model->frame_pre_proc(in_frame, &input, native_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_frame_to_dnn(in_frame, &input, native_model->model->func_type, ctx);
|
||||
}
|
||||
@ -358,8 +358,8 @@ static DNNReturnType execute_model_native(const DNNModel *model, const char *inp
|
||||
output.dt = oprd->data_type;
|
||||
|
||||
if (do_ioproc) {
|
||||
if (native_model->model->post_proc != NULL) {
|
||||
native_model->model->post_proc(out_frame, &output, native_model->model->filter_ctx);
|
||||
if (native_model->model->frame_post_proc != NULL) {
|
||||
native_model->model->frame_post_proc(out_frame, &output, native_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_dnn_to_frame(out_frame, &output, ctx);
|
||||
}
|
||||
|
@ -166,8 +166,8 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem *request
|
||||
for (int i = 0; i < request->task_count; ++i) {
|
||||
task = request->tasks[i];
|
||||
if (task->do_ioproc) {
|
||||
if (ov_model->model->pre_proc != NULL) {
|
||||
ov_model->model->pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
|
||||
if (ov_model->model->frame_pre_proc != NULL) {
|
||||
ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_frame_to_dnn(task->in_frame, &input, ov_model->model->func_type, ctx);
|
||||
}
|
||||
@ -237,8 +237,8 @@ static void infer_completion_callback(void *args)
|
||||
for (int i = 0; i < request->task_count; ++i) {
|
||||
task = request->tasks[i];
|
||||
if (task->do_ioproc) {
|
||||
if (task->ov_model->model->post_proc != NULL) {
|
||||
task->ov_model->model->post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
|
||||
if (task->ov_model->model->frame_post_proc != NULL) {
|
||||
task->ov_model->model->frame_post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx);
|
||||
}
|
||||
|
@ -756,8 +756,8 @@ static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
|
||||
input.data = (float *)TF_TensorData(input_tensor);
|
||||
|
||||
if (do_ioproc) {
|
||||
if (tf_model->model->pre_proc != NULL) {
|
||||
tf_model->model->pre_proc(in_frame, &input, tf_model->model->filter_ctx);
|
||||
if (tf_model->model->frame_pre_proc != NULL) {
|
||||
tf_model->model->frame_pre_proc(in_frame, &input, tf_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_frame_to_dnn(in_frame, &input, tf_model->model->func_type, ctx);
|
||||
}
|
||||
@ -818,8 +818,8 @@ static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
|
||||
output.dt = TF_TensorType(output_tensors[i]);
|
||||
|
||||
if (do_ioproc) {
|
||||
if (tf_model->model->post_proc != NULL) {
|
||||
tf_model->model->post_proc(out_frame, &output, tf_model->model->filter_ctx);
|
||||
if (tf_model->model->frame_post_proc != NULL) {
|
||||
tf_model->model->frame_post_proc(out_frame, &output, tf_model->model->filter_ctx);
|
||||
} else {
|
||||
ff_proc_from_dnn_to_frame(out_frame, &output, ctx);
|
||||
}
|
||||
|
@ -64,6 +64,13 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc)
|
||||
{
|
||||
ctx->model->frame_pre_proc = pre_proc;
|
||||
ctx->model->frame_post_proc = post_proc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input)
|
||||
{
|
||||
return ctx->model->get_input(ctx->model->model, input, ctx->model_inputname);
|
||||
|
@ -48,6 +48,7 @@ typedef struct DnnContext {
|
||||
|
||||
|
||||
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx);
|
||||
int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc);
|
||||
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input);
|
||||
DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height);
|
||||
DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
|
||||
|
@ -63,6 +63,8 @@ typedef struct DNNData{
|
||||
DNNColorOrder order;
|
||||
} DNNData;
|
||||
|
||||
typedef int (*FramePrePostProc)(AVFrame *frame, DNNData *model, AVFilterContext *filter_ctx);
|
||||
|
||||
typedef struct DNNModel{
|
||||
// Stores model that can be different for different backends.
|
||||
void *model;
|
||||
@ -80,10 +82,10 @@ typedef struct DNNModel{
|
||||
const char *output_name, int *output_width, int *output_height);
|
||||
// set the pre process to transfer data from AVFrame to DNNData
|
||||
// the default implementation within DNN is used if it is not provided by the filter
|
||||
int (*pre_proc)(AVFrame *frame_in, DNNData *model_input, AVFilterContext *filter_ctx);
|
||||
FramePrePostProc frame_pre_proc;
|
||||
// set the post process to transfer data from DNNData to AVFrame
|
||||
// the default implementation within DNN is used if it is not provided by the filter
|
||||
int (*post_proc)(AVFrame *frame_out, DNNData *model_output, AVFilterContext *filter_ctx);
|
||||
FramePrePostProc frame_post_proc;
|
||||
} DNNModel;
|
||||
|
||||
// Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
|
||||
|
Loading…
x
Reference in New Issue
Block a user