2018-05-25 19:31:04 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Sergey Lavrushkin
|
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* DNN inference engine interface.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef AVFILTER_DNN_INTERFACE_H
|
|
|
|
#define AVFILTER_DNN_INTERFACE_H
|
|
|
|
|
2019-04-25 04:14:33 +02:00
|
|
|
#include <stdint.h>
|
2020-08-28 06:51:44 +02:00
|
|
|
#include "libavutil/frame.h"
|
2020-12-31 03:19:39 +02:00
|
|
|
#include "avfilter.h"
|
2019-04-25 04:14:33 +02:00
|
|
|
|
2022-03-02 20:05:50 +02:00
|
|
|
#define DNN_GENERIC_ERROR FFERRTAG('D','N','N','!')
|
|
|
|
|
2024-03-15 06:42:49 +02:00
|
|
|
typedef enum {DNN_TF = 1, DNN_OV, DNN_TH} DNNBackendType;
|
2018-05-25 19:31:04 +02:00
|
|
|
|
2019-08-20 10:50:34 +02:00
|
|
|
typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType;
|
2019-04-25 04:14:42 +02:00
|
|
|
|
2021-02-08 03:53:39 +02:00
|
|
|
typedef enum {
|
|
|
|
DCO_NONE,
|
|
|
|
DCO_BGR,
|
2021-05-06 10:46:07 +02:00
|
|
|
DCO_RGB,
|
2021-02-08 03:53:39 +02:00
|
|
|
} DNNColorOrder;
|
|
|
|
|
2020-11-18 08:28:06 +02:00
|
|
|
typedef enum {
|
|
|
|
DAST_FAIL, // something wrong
|
|
|
|
DAST_EMPTY_QUEUE, // no more inference result to get
|
|
|
|
DAST_NOT_READY, // all queued inferences are not finished
|
|
|
|
DAST_SUCCESS // got a result frame successfully
|
|
|
|
} DNNAsyncStatusType;
|
|
|
|
|
2021-02-07 08:35:22 +02:00
|
|
|
typedef enum {
|
|
|
|
DFT_NONE,
|
|
|
|
DFT_PROCESS_FRAME, // process the whole frame
|
|
|
|
DFT_ANALYTICS_DETECT, // detect from the whole frame
|
2021-03-16 07:02:56 +02:00
|
|
|
DFT_ANALYTICS_CLASSIFY, // classify for each bounding box
|
2021-02-07 08:35:22 +02:00
|
|
|
}DNNFunctionType;
|
|
|
|
|
2023-09-21 03:26:31 +02:00
|
|
|
typedef enum {
|
|
|
|
DL_NONE,
|
|
|
|
DL_NCHW,
|
|
|
|
DL_NHWC,
|
|
|
|
} DNNLayout;
|
|
|
|
|
2019-10-21 14:38:10 +02:00
|
|
|
typedef struct DNNData{
|
2019-04-25 04:14:42 +02:00
|
|
|
void *data;
|
2024-01-17 09:21:50 +02:00
|
|
|
int dims[4];
|
2021-02-08 03:53:39 +02:00
|
|
|
// dt and order together decide the color format
|
|
|
|
DNNDataType dt;
|
|
|
|
DNNColorOrder order;
|
2023-09-21 03:26:31 +02:00
|
|
|
DNNLayout layout;
|
2023-09-21 03:26:32 +02:00
|
|
|
float scale;
|
|
|
|
float mean;
|
2018-05-25 19:31:04 +02:00
|
|
|
} DNNData;
|
|
|
|
|
2021-04-01 04:06:06 +02:00
|
|
|
typedef struct DNNExecBaseParams {
|
|
|
|
const char *input_name;
|
|
|
|
const char **output_names;
|
|
|
|
uint32_t nb_output;
|
|
|
|
AVFrame *in_frame;
|
|
|
|
AVFrame *out_frame;
|
|
|
|
} DNNExecBaseParams;
|
|
|
|
|
2021-03-16 07:02:56 +02:00
|
|
|
typedef struct DNNExecClassificationParams {
|
|
|
|
DNNExecBaseParams base;
|
|
|
|
const char *target;
|
|
|
|
} DNNExecClassificationParams;
|
|
|
|
|
2021-03-01 13:23:20 +02:00
|
|
|
typedef int (*FramePrePostProc)(AVFrame *frame, DNNData *model, AVFilterContext *filter_ctx);
|
2021-03-09 08:51:42 +02:00
|
|
|
typedef int (*DetectPostProc)(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx);
|
2021-03-16 07:02:56 +02:00
|
|
|
typedef int (*ClassifyPostProc)(AVFrame *frame, DNNData *output, uint32_t bbox_index, AVFilterContext *filter_ctx);
|
2021-03-01 13:23:20 +02:00
|
|
|
|
2018-05-25 19:31:04 +02:00
|
|
|
typedef struct DNNModel{
|
|
|
|
// Stores model that can be different for different backends.
|
2018-07-27 18:34:02 +02:00
|
|
|
void *model;
|
2020-11-18 08:54:10 +02:00
|
|
|
// Stores FilterContext used for the interaction between AVFrame and DNNData
|
|
|
|
AVFilterContext *filter_ctx;
|
2021-02-07 08:35:22 +02:00
|
|
|
// Stores function type of the model
|
|
|
|
DNNFunctionType func_type;
|
2019-10-21 14:38:17 +02:00
|
|
|
// Gets model input information
|
|
|
|
// Just reuse struct DNNData here, actually the DNNData.data field is not needed.
|
2022-03-02 20:05:52 +02:00
|
|
|
int (*get_input)(void *model, DNNData *input, const char *input_name);
|
2020-09-11 16:15:04 +02:00
|
|
|
// Gets model output width/height with given input w/h
|
2022-03-02 20:05:52 +02:00
|
|
|
int (*get_output)(void *model, const char *input_name, int input_width, int input_height,
|
2020-09-11 16:15:04 +02:00
|
|
|
const char *output_name, int *output_width, int *output_height);
|
2020-08-28 06:51:44 +02:00
|
|
|
// set the pre process to transfer data from AVFrame to DNNData
|
|
|
|
// the default implementation within DNN is used if it is not provided by the filter
|
2021-03-01 13:23:20 +02:00
|
|
|
FramePrePostProc frame_pre_proc;
|
2020-08-28 06:51:44 +02:00
|
|
|
// set the post process to transfer data from DNNData to AVFrame
|
|
|
|
// the default implementation within DNN is used if it is not provided by the filter
|
2021-03-01 13:23:20 +02:00
|
|
|
FramePrePostProc frame_post_proc;
|
2021-03-09 08:51:42 +02:00
|
|
|
// set the post process to interpret detect result from DNNData
|
|
|
|
DetectPostProc detect_post_proc;
|
2021-03-16 07:02:56 +02:00
|
|
|
// set the post process to interpret classify result from DNNData
|
|
|
|
ClassifyPostProc classify_post_proc;
|
2018-05-25 19:31:04 +02:00
|
|
|
} DNNModel;
|
|
|
|
|
avfilter/dnn: Refactor DNN parameter configuration system
This patch trying to resolve mulitiple issues related to parameter
configuration:
Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should
be the common options of backend.
Secondly, backend options are hidden behind the scene. It's a
AV_OPT_TYPE_STRING backend_configs for user, and parsed by each
backend. We don't know each backend support what kind of options
from the help message.
Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS.
Last but not the least, pass backend options via AV_OPT_TYPE_STRING
makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible.
This patch puts backend common options and each backend options inside
DnnContext to reduce code duplication, make options user friendly, and
easy to extend for future usecase.
For example,
./ffmpeg -h filter=dnn_processing
dnn_processing AVOptions:
dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow)
tensorflow 1 ..FV....... tensorflow backend flag
openvino 2 ..FV....... openvino backend flag
torch 3 ..FV....... torch backend flag
dnn_base AVOptions:
model <string> ..F........ path to model file
input <string> ..F........ input name of the model
output <string> ..F........ output name of the model
backend_configs <string> ..F.......P backend configs (deprecated)
options <string> ..F.......P backend configs (deprecated)
nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0)
async <boolean> ..F........ use DNN async inference (default true)
device <string> ..F........ device to run model
dnn_tensorflow AVOptions:
sess_config <string> ..F........ config for SessionOptions
dnn_openvino AVOptions:
batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1)
input_resizable <boolean> ..F........ can input be resizable or not (default false)
layout <int> ..F........ input layout of model (from 0 to 2) (default none)
none 0 ..F........ none
nchw 1 ..F........ nchw
nhwc 2 ..F........ nhwc
scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0)
mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0)
dnn_th AVOptions:
optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0)
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
Reviewed-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
2024-05-07 18:08:08 +02:00
|
|
|
typedef struct TFOptions{
|
|
|
|
const AVClass *clazz;
|
|
|
|
|
|
|
|
char *sess_config;
|
|
|
|
} TFOptions;
|
|
|
|
|
|
|
|
typedef struct OVOptions {
|
|
|
|
const AVClass *clazz;
|
|
|
|
|
|
|
|
int batch_size;
|
|
|
|
int input_resizable;
|
|
|
|
DNNLayout layout;
|
|
|
|
float scale;
|
|
|
|
float mean;
|
|
|
|
} OVOptions;
|
|
|
|
|
|
|
|
typedef struct THOptions {
|
|
|
|
const AVClass *clazz;
|
|
|
|
int optimize;
|
|
|
|
} THOptions;
|
|
|
|
|
|
|
|
typedef struct DNNModule DNNModule;
|
|
|
|
|
|
|
|
typedef struct DnnContext {
|
|
|
|
const AVClass *clazz;
|
|
|
|
|
|
|
|
DNNModel *model;
|
|
|
|
|
|
|
|
char *model_filename;
|
|
|
|
DNNBackendType backend_type;
|
|
|
|
char *model_inputname;
|
|
|
|
char *model_outputnames_string;
|
|
|
|
char *backend_options;
|
|
|
|
int async;
|
|
|
|
|
|
|
|
char **model_outputnames;
|
|
|
|
uint32_t nb_outputs;
|
|
|
|
const DNNModule *dnn_module;
|
|
|
|
|
|
|
|
int nireq;
|
|
|
|
char *device;
|
|
|
|
|
|
|
|
#if CONFIG_LIBTENSORFLOW
|
|
|
|
TFOptions tf_option;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_LIBOPENVINO
|
|
|
|
OVOptions ov_option;
|
|
|
|
#endif
|
|
|
|
#if CONFIG_LIBTORCH
|
|
|
|
THOptions torch_option;
|
|
|
|
#endif
|
|
|
|
} DnnContext;
|
|
|
|
|
2018-05-25 19:31:04 +02:00
|
|
|
// Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
|
avfilter/dnn: Refactor DNN parameter configuration system
This patch trying to resolve mulitiple issues related to parameter
configuration:
Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should
be the common options of backend.
Secondly, backend options are hidden behind the scene. It's a
AV_OPT_TYPE_STRING backend_configs for user, and parsed by each
backend. We don't know each backend support what kind of options
from the help message.
Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS.
Last but not the least, pass backend options via AV_OPT_TYPE_STRING
makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible.
This patch puts backend common options and each backend options inside
DnnContext to reduce code duplication, make options user friendly, and
easy to extend for future usecase.
For example,
./ffmpeg -h filter=dnn_processing
dnn_processing AVOptions:
dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow)
tensorflow 1 ..FV....... tensorflow backend flag
openvino 2 ..FV....... openvino backend flag
torch 3 ..FV....... torch backend flag
dnn_base AVOptions:
model <string> ..F........ path to model file
input <string> ..F........ input name of the model
output <string> ..F........ output name of the model
backend_configs <string> ..F.......P backend configs (deprecated)
options <string> ..F.......P backend configs (deprecated)
nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0)
async <boolean> ..F........ use DNN async inference (default true)
device <string> ..F........ device to run model
dnn_tensorflow AVOptions:
sess_config <string> ..F........ config for SessionOptions
dnn_openvino AVOptions:
batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1)
input_resizable <boolean> ..F........ can input be resizable or not (default false)
layout <int> ..F........ input layout of model (from 0 to 2) (default none)
none 0 ..F........ none
nchw 1 ..F........ nchw
nhwc 2 ..F........ nhwc
scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0)
mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0)
dnn_th AVOptions:
optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0)
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
Reviewed-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
2024-05-07 18:08:08 +02:00
|
|
|
struct DNNModule {
|
|
|
|
const AVClass clazz;
|
2018-05-25 19:31:04 +02:00
|
|
|
// Loads model and parameters from given file. Returns NULL if it is not possible.
|
avfilter/dnn: Refactor DNN parameter configuration system
This patch trying to resolve mulitiple issues related to parameter
configuration:
Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should
be the common options of backend.
Secondly, backend options are hidden behind the scene. It's a
AV_OPT_TYPE_STRING backend_configs for user, and parsed by each
backend. We don't know each backend support what kind of options
from the help message.
Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS.
Last but not the least, pass backend options via AV_OPT_TYPE_STRING
makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible.
This patch puts backend common options and each backend options inside
DnnContext to reduce code duplication, make options user friendly, and
easy to extend for future usecase.
For example,
./ffmpeg -h filter=dnn_processing
dnn_processing AVOptions:
dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow)
tensorflow 1 ..FV....... tensorflow backend flag
openvino 2 ..FV....... openvino backend flag
torch 3 ..FV....... torch backend flag
dnn_base AVOptions:
model <string> ..F........ path to model file
input <string> ..F........ input name of the model
output <string> ..F........ output name of the model
backend_configs <string> ..F.......P backend configs (deprecated)
options <string> ..F.......P backend configs (deprecated)
nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0)
async <boolean> ..F........ use DNN async inference (default true)
device <string> ..F........ device to run model
dnn_tensorflow AVOptions:
sess_config <string> ..F........ config for SessionOptions
dnn_openvino AVOptions:
batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1)
input_resizable <boolean> ..F........ can input be resizable or not (default false)
layout <int> ..F........ input layout of model (from 0 to 2) (default none)
none 0 ..F........ none
nchw 1 ..F........ nchw
nhwc 2 ..F........ nhwc
scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0)
mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0)
dnn_th AVOptions:
optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0)
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
Reviewed-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
2024-05-07 18:08:08 +02:00
|
|
|
DNNModel *(*load_model)(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx);
|
2022-03-02 20:05:52 +02:00
|
|
|
// Executes model with specified input and output. Returns the error code otherwise.
|
|
|
|
int (*execute_model)(const DNNModel *model, DNNExecBaseParams *exec_params);
|
2020-11-18 08:28:06 +02:00
|
|
|
// Retrieve inference result.
|
2021-08-25 23:10:45 +02:00
|
|
|
DNNAsyncStatusType (*get_result)(const DNNModel *model, AVFrame **in, AVFrame **out);
|
2021-01-07 05:14:10 +02:00
|
|
|
// Flush all the pending tasks.
|
2022-03-02 20:05:52 +02:00
|
|
|
int (*flush)(const DNNModel *model);
|
2018-05-25 19:31:04 +02:00
|
|
|
// Frees memory allocated for model.
|
2018-07-27 18:34:02 +02:00
|
|
|
void (*free_model)(DNNModel **model);
|
avfilter/dnn: Refactor DNN parameter configuration system
This patch trying to resolve mulitiple issues related to parameter
configuration:
Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should
be the common options of backend.
Secondly, backend options are hidden behind the scene. It's a
AV_OPT_TYPE_STRING backend_configs for user, and parsed by each
backend. We don't know each backend support what kind of options
from the help message.
Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS.
Last but not the least, pass backend options via AV_OPT_TYPE_STRING
makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible.
This patch puts backend common options and each backend options inside
DnnContext to reduce code duplication, make options user friendly, and
easy to extend for future usecase.
For example,
./ffmpeg -h filter=dnn_processing
dnn_processing AVOptions:
dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow)
tensorflow 1 ..FV....... tensorflow backend flag
openvino 2 ..FV....... openvino backend flag
torch 3 ..FV....... torch backend flag
dnn_base AVOptions:
model <string> ..F........ path to model file
input <string> ..F........ input name of the model
output <string> ..F........ output name of the model
backend_configs <string> ..F.......P backend configs (deprecated)
options <string> ..F.......P backend configs (deprecated)
nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0)
async <boolean> ..F........ use DNN async inference (default true)
device <string> ..F........ device to run model
dnn_tensorflow AVOptions:
sess_config <string> ..F........ config for SessionOptions
dnn_openvino AVOptions:
batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1)
input_resizable <boolean> ..F........ can input be resizable or not (default false)
layout <int> ..F........ input layout of model (from 0 to 2) (default none)
none 0 ..F........ none
nchw 1 ..F........ nchw
nhwc 2 ..F........ nhwc
scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0)
mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0)
dnn_th AVOptions:
optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0)
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
Reviewed-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
2024-05-07 18:08:08 +02:00
|
|
|
};
|
2018-05-25 19:31:04 +02:00
|
|
|
|
|
|
|
// Initializes DNNModule depending on chosen backend.
|
2023-04-30 17:38:55 +02:00
|
|
|
const DNNModule *ff_get_dnn_module(DNNBackendType backend_type, void *log_ctx);
|
2018-05-25 19:31:04 +02:00
|
|
|
|
avfilter/dnn: Refactor DNN parameter configuration system
This patch trying to resolve mulitiple issues related to parameter
configuration:
Firstly, each DNN filters duplicate DNN_COMMON_OPTIONS, which should
be the common options of backend.
Secondly, backend options are hidden behind the scene. It's a
AV_OPT_TYPE_STRING backend_configs for user, and parsed by each
backend. We don't know each backend support what kind of options
from the help message.
Third, DNN backends duplicate DNN_BACKEND_COMMON_OPTIONS.
Last but not the least, pass backend options via AV_OPT_TYPE_STRING
makes it hard to pass AV_OPT_TYPE_BINARY to backend, if not impossible.
This patch puts backend common options and each backend options inside
DnnContext to reduce code duplication, make options user friendly, and
easy to extend for future usecase.
For example,
./ffmpeg -h filter=dnn_processing
dnn_processing AVOptions:
dnn_backend <int> ..FV....... DNN backend (from INT_MIN to INT_MAX) (default tensorflow)
tensorflow 1 ..FV....... tensorflow backend flag
openvino 2 ..FV....... openvino backend flag
torch 3 ..FV....... torch backend flag
dnn_base AVOptions:
model <string> ..F........ path to model file
input <string> ..F........ input name of the model
output <string> ..F........ output name of the model
backend_configs <string> ..F.......P backend configs (deprecated)
options <string> ..F.......P backend configs (deprecated)
nireq <int> ..F........ number of request (from 0 to INT_MAX) (default 0)
async <boolean> ..F........ use DNN async inference (default true)
device <string> ..F........ device to run model
dnn_tensorflow AVOptions:
sess_config <string> ..F........ config for SessionOptions
dnn_openvino AVOptions:
batch_size <int> ..F........ batch size per request (from 1 to 1000) (default 1)
input_resizable <boolean> ..F........ can input be resizable or not (default false)
layout <int> ..F........ input layout of model (from 0 to 2) (default none)
none 0 ..F........ none
nchw 1 ..F........ nchw
nhwc 2 ..F........ nhwc
scale <float> ..F........ Add scale preprocess operation. Divide each element of input by specified value. (from INT_MIN to INT_MAX) (default 0)
mean <float> ..F........ Add mean preprocess operation. Subtract specified value from each element of input. (from INT_MIN to INT_MAX) (default 0)
dnn_th AVOptions:
optimize <int> ..F........ turn on graph executor optimization (from 0 to 1) (default 0)
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
Reviewed-by: Wenbin Chen <wenbin.chen@intel.com>
Reviewed-by: Guo Yejun <yejun.guo@intel.com>
2024-05-07 18:08:08 +02:00
|
|
|
void ff_dnn_init_child_class(DnnContext *ctx);
|
|
|
|
void *ff_dnn_child_next(DnnContext *obj, void *prev);
|
|
|
|
const AVClass *ff_dnn_child_class_iterate(void **iter);
|
|
|
|
|
2024-01-17 09:21:50 +02:00
|
|
|
static inline int dnn_get_width_idx_by_layout(DNNLayout layout)
|
|
|
|
{
|
|
|
|
return layout == DL_NHWC ? 2 : 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dnn_get_height_idx_by_layout(DNNLayout layout)
|
|
|
|
{
|
|
|
|
return layout == DL_NHWC ? 1 : 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dnn_get_channel_idx_by_layout(DNNLayout layout)
|
|
|
|
{
|
|
|
|
return layout == DL_NHWC ? 3 : 1;
|
|
|
|
}
|
|
|
|
|
2018-05-25 19:31:04 +02:00
|
|
|
#endif
|