mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-02-04 06:08:26 +02:00
lavfi/dnn: Rename InferenceItem to LastLevelTaskItem
This patch renames the InferenceItem to LastLevelTaskItem in the three backends to avoid confusion among the meanings of these structs. The following are the renames done in this patch: 1. extract_inference_from_task -> extract_lltask_from_task 2. InferenceItem -> LastLevelTaskItem 3. inference_queue -> lltask_queue 4. inference -> lltask 5. inference_count -> lltask_count Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
This commit is contained in:
parent
1544d6fa0a
commit
660a205b05
@ -47,10 +47,10 @@ typedef struct TaskItem {
|
|||||||
} TaskItem;
|
} TaskItem;
|
||||||
|
|
||||||
// one task might have multiple inferences
|
// one task might have multiple inferences
|
||||||
typedef struct InferenceItem {
|
typedef struct LastLevelTaskItem {
|
||||||
TaskItem *task;
|
TaskItem *task;
|
||||||
uint32_t bbox_index;
|
uint32_t bbox_index;
|
||||||
} InferenceItem;
|
} LastLevelTaskItem;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Common Async Execution Mechanism for the DNN Backends.
|
* Common Async Execution Mechanism for the DNN Backends.
|
||||||
|
@ -46,25 +46,25 @@ static const AVClass dnn_native_class = {
|
|||||||
.category = AV_CLASS_CATEGORY_FILTER,
|
.category = AV_CLASS_CATEGORY_FILTER,
|
||||||
};
|
};
|
||||||
|
|
||||||
static DNNReturnType execute_model_native(Queue *inference_queue);
|
static DNNReturnType execute_model_native(Queue *lltask_queue);
|
||||||
|
|
||||||
static DNNReturnType extract_inference_from_task(TaskItem *task, Queue *inference_queue)
|
static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
|
||||||
{
|
{
|
||||||
NativeModel *native_model = task->model;
|
NativeModel *native_model = task->model;
|
||||||
NativeContext *ctx = &native_model->ctx;
|
NativeContext *ctx = &native_model->ctx;
|
||||||
InferenceItem *inference = av_malloc(sizeof(*inference));
|
LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
|
||||||
|
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for InferenceItem\n");
|
av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
task->inference_todo = 1;
|
task->inference_todo = 1;
|
||||||
task->inference_done = 0;
|
task->inference_done = 0;
|
||||||
inference->task = task;
|
lltask->task = task;
|
||||||
|
|
||||||
if (ff_queue_push_back(inference_queue, inference) < 0) {
|
if (ff_queue_push_back(lltask_queue, lltask) < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Failed to push back inference_queue.\n");
|
av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
|
||||||
av_freep(&inference);
|
av_freep(&lltask);
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
@ -116,13 +116,13 @@ static DNNReturnType get_output_native(void *model, const char *input_name, int
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(&task, native_model->inference_queue) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(&task, native_model->lltask_queue) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
|
||||||
ret = DNN_ERROR;
|
ret = DNN_ERROR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = execute_model_native(native_model->inference_queue);
|
ret = execute_model_native(native_model->lltask_queue);
|
||||||
*output_width = task.out_frame->width;
|
*output_width = task.out_frame->width;
|
||||||
*output_height = task.out_frame->height;
|
*output_height = task.out_frame->height;
|
||||||
|
|
||||||
@ -223,8 +223,8 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType f
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
native_model->inference_queue = ff_queue_create();
|
native_model->lltask_queue = ff_queue_create();
|
||||||
if (!native_model->inference_queue) {
|
if (!native_model->lltask_queue) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,24 +297,24 @@ fail:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DNNReturnType execute_model_native(Queue *inference_queue)
|
static DNNReturnType execute_model_native(Queue *lltask_queue)
|
||||||
{
|
{
|
||||||
NativeModel *native_model = NULL;
|
NativeModel *native_model = NULL;
|
||||||
NativeContext *ctx = NULL;
|
NativeContext *ctx = NULL;
|
||||||
int32_t layer;
|
int32_t layer;
|
||||||
DNNData input, output;
|
DNNData input, output;
|
||||||
DnnOperand *oprd = NULL;
|
DnnOperand *oprd = NULL;
|
||||||
InferenceItem *inference = NULL;
|
LastLevelTaskItem *lltask = NULL;
|
||||||
TaskItem *task = NULL;
|
TaskItem *task = NULL;
|
||||||
DNNReturnType ret = 0;
|
DNNReturnType ret = 0;
|
||||||
|
|
||||||
inference = ff_queue_pop_front(inference_queue);
|
lltask = ff_queue_pop_front(lltask_queue);
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to get inference item\n");
|
av_log(NULL, AV_LOG_ERROR, "Failed to get LastLevelTaskItem\n");
|
||||||
ret = DNN_ERROR;
|
ret = DNN_ERROR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
native_model = task->model;
|
native_model = task->model;
|
||||||
ctx = &native_model->ctx;
|
ctx = &native_model->ctx;
|
||||||
|
|
||||||
@ -428,7 +428,7 @@ static DNNReturnType execute_model_native(Queue *inference_queue)
|
|||||||
}
|
}
|
||||||
task->inference_done++;
|
task->inference_done++;
|
||||||
err:
|
err:
|
||||||
av_freep(&inference);
|
av_freep(&lltask);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,26 +459,26 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBasePara
|
|||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(task, native_model->inference_queue) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(task, native_model->lltask_queue) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return execute_model_native(native_model->inference_queue);
|
return execute_model_native(native_model->lltask_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
DNNReturnType ff_dnn_flush_native(const DNNModel *model)
|
DNNReturnType ff_dnn_flush_native(const DNNModel *model)
|
||||||
{
|
{
|
||||||
NativeModel *native_model = model->model;
|
NativeModel *native_model = model->model;
|
||||||
|
|
||||||
if (ff_queue_size(native_model->inference_queue) == 0) {
|
if (ff_queue_size(native_model->lltask_queue) == 0) {
|
||||||
// no pending task need to flush
|
// no pending task need to flush
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// for now, use sync node with flush operation
|
// for now, use sync node with flush operation
|
||||||
// Switch to async when it is supported
|
// Switch to async when it is supported
|
||||||
return execute_model_native(native_model->inference_queue);
|
return execute_model_native(native_model->lltask_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out)
|
DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out)
|
||||||
@ -536,11 +536,11 @@ void ff_dnn_free_model_native(DNNModel **model)
|
|||||||
av_freep(&native_model->operands);
|
av_freep(&native_model->operands);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (ff_queue_size(native_model->inference_queue) != 0) {
|
while (ff_queue_size(native_model->lltask_queue) != 0) {
|
||||||
InferenceItem *item = ff_queue_pop_front(native_model->inference_queue);
|
LastLevelTaskItem *item = ff_queue_pop_front(native_model->lltask_queue);
|
||||||
av_freep(&item);
|
av_freep(&item);
|
||||||
}
|
}
|
||||||
ff_queue_destroy(native_model->inference_queue);
|
ff_queue_destroy(native_model->lltask_queue);
|
||||||
|
|
||||||
while (ff_queue_size(native_model->task_queue) != 0) {
|
while (ff_queue_size(native_model->task_queue) != 0) {
|
||||||
TaskItem *item = ff_queue_pop_front(native_model->task_queue);
|
TaskItem *item = ff_queue_pop_front(native_model->task_queue);
|
||||||
|
@ -129,7 +129,7 @@ typedef struct NativeModel{
|
|||||||
DnnOperand *operands;
|
DnnOperand *operands;
|
||||||
int32_t operands_num;
|
int32_t operands_num;
|
||||||
Queue *task_queue;
|
Queue *task_queue;
|
||||||
Queue *inference_queue;
|
Queue *lltask_queue;
|
||||||
} NativeModel;
|
} NativeModel;
|
||||||
|
|
||||||
DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
|
DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
|
||||||
|
@ -57,14 +57,14 @@ typedef struct OVModel{
|
|||||||
ie_executable_network_t *exe_network;
|
ie_executable_network_t *exe_network;
|
||||||
SafeQueue *request_queue; // holds OVRequestItem
|
SafeQueue *request_queue; // holds OVRequestItem
|
||||||
Queue *task_queue; // holds TaskItem
|
Queue *task_queue; // holds TaskItem
|
||||||
Queue *inference_queue; // holds InferenceItem
|
Queue *lltask_queue; // holds LastLevelTaskItem
|
||||||
} OVModel;
|
} OVModel;
|
||||||
|
|
||||||
// one request for one call to openvino
|
// one request for one call to openvino
|
||||||
typedef struct OVRequestItem {
|
typedef struct OVRequestItem {
|
||||||
ie_infer_request_t *infer_request;
|
ie_infer_request_t *infer_request;
|
||||||
InferenceItem **inferences;
|
LastLevelTaskItem **lltasks;
|
||||||
uint32_t inference_count;
|
uint32_t lltask_count;
|
||||||
ie_complete_call_back_t callback;
|
ie_complete_call_back_t callback;
|
||||||
} OVRequestItem;
|
} OVRequestItem;
|
||||||
|
|
||||||
@ -121,12 +121,12 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *reque
|
|||||||
IEStatusCode status;
|
IEStatusCode status;
|
||||||
DNNData input;
|
DNNData input;
|
||||||
ie_blob_t *input_blob = NULL;
|
ie_blob_t *input_blob = NULL;
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
TaskItem *task;
|
TaskItem *task;
|
||||||
|
|
||||||
inference = ff_queue_peek_front(ov_model->inference_queue);
|
lltask = ff_queue_peek_front(ov_model->lltask_queue);
|
||||||
av_assert0(inference);
|
av_assert0(lltask);
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
|
|
||||||
status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
|
status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
|
||||||
if (status != OK) {
|
if (status != OK) {
|
||||||
@ -159,13 +159,13 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *reque
|
|||||||
input.order = DCO_BGR;
|
input.order = DCO_BGR;
|
||||||
|
|
||||||
for (int i = 0; i < ctx->options.batch_size; ++i) {
|
for (int i = 0; i < ctx->options.batch_size; ++i) {
|
||||||
inference = ff_queue_pop_front(ov_model->inference_queue);
|
lltask = ff_queue_pop_front(ov_model->lltask_queue);
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
request->inferences[i] = inference;
|
request->lltasks[i] = lltask;
|
||||||
request->inference_count = i + 1;
|
request->lltask_count = i + 1;
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
switch (ov_model->model->func_type) {
|
switch (ov_model->model->func_type) {
|
||||||
case DFT_PROCESS_FRAME:
|
case DFT_PROCESS_FRAME:
|
||||||
if (task->do_ioproc) {
|
if (task->do_ioproc) {
|
||||||
@ -180,7 +180,7 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *reque
|
|||||||
ff_frame_to_dnn_detect(task->in_frame, &input, ctx);
|
ff_frame_to_dnn_detect(task->in_frame, &input, ctx);
|
||||||
break;
|
break;
|
||||||
case DFT_ANALYTICS_CLASSIFY:
|
case DFT_ANALYTICS_CLASSIFY:
|
||||||
ff_frame_to_dnn_classify(task->in_frame, &input, inference->bbox_index, ctx);
|
ff_frame_to_dnn_classify(task->in_frame, &input, lltask->bbox_index, ctx);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_assert0(!"should not reach here");
|
av_assert0(!"should not reach here");
|
||||||
@ -200,8 +200,8 @@ static void infer_completion_callback(void *args)
|
|||||||
precision_e precision;
|
precision_e precision;
|
||||||
IEStatusCode status;
|
IEStatusCode status;
|
||||||
OVRequestItem *request = args;
|
OVRequestItem *request = args;
|
||||||
InferenceItem *inference = request->inferences[0];
|
LastLevelTaskItem *lltask = request->lltasks[0];
|
||||||
TaskItem *task = inference->task;
|
TaskItem *task = lltask->task;
|
||||||
OVModel *ov_model = task->model;
|
OVModel *ov_model = task->model;
|
||||||
SafeQueue *requestq = ov_model->request_queue;
|
SafeQueue *requestq = ov_model->request_queue;
|
||||||
ie_blob_t *output_blob = NULL;
|
ie_blob_t *output_blob = NULL;
|
||||||
@ -248,10 +248,10 @@ static void infer_completion_callback(void *args)
|
|||||||
output.dt = precision_to_datatype(precision);
|
output.dt = precision_to_datatype(precision);
|
||||||
output.data = blob_buffer.buffer;
|
output.data = blob_buffer.buffer;
|
||||||
|
|
||||||
av_assert0(request->inference_count <= dims.dims[0]);
|
av_assert0(request->lltask_count <= dims.dims[0]);
|
||||||
av_assert0(request->inference_count >= 1);
|
av_assert0(request->lltask_count >= 1);
|
||||||
for (int i = 0; i < request->inference_count; ++i) {
|
for (int i = 0; i < request->lltask_count; ++i) {
|
||||||
task = request->inferences[i]->task;
|
task = request->lltasks[i]->task;
|
||||||
task->inference_done++;
|
task->inference_done++;
|
||||||
|
|
||||||
switch (ov_model->model->func_type) {
|
switch (ov_model->model->func_type) {
|
||||||
@ -279,20 +279,20 @@ static void infer_completion_callback(void *args)
|
|||||||
av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
|
av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ov_model->model->classify_post_proc(task->in_frame, &output, request->inferences[i]->bbox_index, ov_model->model->filter_ctx);
|
ov_model->model->classify_post_proc(task->in_frame, &output, request->lltasks[i]->bbox_index, ov_model->model->filter_ctx);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_assert0(!"should not reach here");
|
av_assert0(!"should not reach here");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_freep(&request->inferences[i]);
|
av_freep(&request->lltasks[i]);
|
||||||
output.data = (uint8_t *)output.data
|
output.data = (uint8_t *)output.data
|
||||||
+ output.width * output.height * output.channels * get_datatype_size(output.dt);
|
+ output.width * output.height * output.channels * get_datatype_size(output.dt);
|
||||||
}
|
}
|
||||||
ie_blob_free(&output_blob);
|
ie_blob_free(&output_blob);
|
||||||
|
|
||||||
request->inference_count = 0;
|
request->lltask_count = 0;
|
||||||
if (ff_safe_queue_push_back(requestq, request) < 0) {
|
if (ff_safe_queue_push_back(requestq, request) < 0) {
|
||||||
ie_infer_request_free(&request->infer_request);
|
ie_infer_request_free(&request->infer_request);
|
||||||
av_freep(&request);
|
av_freep(&request);
|
||||||
@ -399,11 +399,11 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
item->inferences = av_malloc_array(ctx->options.batch_size, sizeof(*item->inferences));
|
item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
|
||||||
if (!item->inferences) {
|
if (!item->lltasks) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
item->inference_count = 0;
|
item->lltask_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ov_model->task_queue = ff_queue_create();
|
ov_model->task_queue = ff_queue_create();
|
||||||
@ -411,8 +411,8 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ov_model->inference_queue = ff_queue_create();
|
ov_model->lltask_queue = ff_queue_create();
|
||||||
if (!ov_model->inference_queue) {
|
if (!ov_model->lltask_queue) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +427,7 @@ static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
|
|||||||
{
|
{
|
||||||
IEStatusCode status;
|
IEStatusCode status;
|
||||||
DNNReturnType ret;
|
DNNReturnType ret;
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
TaskItem *task;
|
TaskItem *task;
|
||||||
OVContext *ctx;
|
OVContext *ctx;
|
||||||
OVModel *ov_model;
|
OVModel *ov_model;
|
||||||
@ -438,8 +438,8 @@ static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
|
|||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
inference = ff_queue_peek_front(inferenceq);
|
lltask = ff_queue_peek_front(inferenceq);
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
ov_model = task->model;
|
ov_model = task->model;
|
||||||
ctx = &ov_model->ctx;
|
ctx = &ov_model->ctx;
|
||||||
|
|
||||||
@ -567,21 +567,21 @@ static int contain_valid_detection_bbox(AVFrame *frame)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
|
static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
|
||||||
{
|
{
|
||||||
switch (func_type) {
|
switch (func_type) {
|
||||||
case DFT_PROCESS_FRAME:
|
case DFT_PROCESS_FRAME:
|
||||||
case DFT_ANALYTICS_DETECT:
|
case DFT_ANALYTICS_DETECT:
|
||||||
{
|
{
|
||||||
InferenceItem *inference = av_malloc(sizeof(*inference));
|
LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
task->inference_todo = 1;
|
task->inference_todo = 1;
|
||||||
task->inference_done = 0;
|
task->inference_done = 0;
|
||||||
inference->task = task;
|
lltask->task = task;
|
||||||
if (ff_queue_push_back(inference_queue, inference) < 0) {
|
if (ff_queue_push_back(lltask_queue, lltask) < 0) {
|
||||||
av_freep(&inference);
|
av_freep(&lltask);
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
@ -604,7 +604,7 @@ static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, Task
|
|||||||
header = (const AVDetectionBBoxHeader *)sd->data;
|
header = (const AVDetectionBBoxHeader *)sd->data;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < header->nb_bboxes; i++) {
|
for (uint32_t i = 0; i < header->nb_bboxes; i++) {
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
const AVDetectionBBox *bbox = av_get_detection_bbox(header, i);
|
const AVDetectionBBox *bbox = av_get_detection_bbox(header, i);
|
||||||
|
|
||||||
if (params->target) {
|
if (params->target) {
|
||||||
@ -613,15 +613,15 @@ static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, Task
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inference = av_malloc(sizeof(*inference));
|
lltask = av_malloc(sizeof(*lltask));
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
task->inference_todo++;
|
task->inference_todo++;
|
||||||
inference->task = task;
|
lltask->task = task;
|
||||||
inference->bbox_index = i;
|
lltask->bbox_index = i;
|
||||||
if (ff_queue_push_back(inference_queue, inference) < 0) {
|
if (ff_queue_push_back(lltask_queue, lltask) < 0) {
|
||||||
av_freep(&inference);
|
av_freep(&lltask);
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -679,8 +679,8 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
|
|||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, NULL) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
|
||||||
ret = DNN_ERROR;
|
ret = DNN_ERROR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -692,7 +692,7 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = execute_model_ov(request, ov_model->inference_queue);
|
ret = execute_model_ov(request, ov_model->lltask_queue);
|
||||||
*output_width = task.out_frame->width;
|
*output_width = task.out_frame->width;
|
||||||
*output_height = task.out_frame->height;
|
*output_height = task.out_frame->height;
|
||||||
err:
|
err:
|
||||||
@ -794,20 +794,20 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *
|
|||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(model->func_type, task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->options.async) {
|
if (ctx->options.async) {
|
||||||
while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
|
while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
|
||||||
request = ff_safe_queue_pop_front(ov_model->request_queue);
|
request = ff_safe_queue_pop_front(ov_model->request_queue);
|
||||||
if (!request) {
|
if (!request) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = execute_model_ov(request, ov_model->inference_queue);
|
ret = execute_model_ov(request, ov_model->lltask_queue);
|
||||||
if (ret != DNN_SUCCESS) {
|
if (ret != DNN_SUCCESS) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -833,7 +833,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *
|
|||||||
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
return execute_model_ov(request, ov_model->inference_queue);
|
return execute_model_ov(request, ov_model->lltask_queue);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -851,7 +851,7 @@ DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
|
|||||||
IEStatusCode status;
|
IEStatusCode status;
|
||||||
DNNReturnType ret;
|
DNNReturnType ret;
|
||||||
|
|
||||||
if (ff_queue_size(ov_model->inference_queue) == 0) {
|
if (ff_queue_size(ov_model->lltask_queue) == 0) {
|
||||||
// no pending task need to flush
|
// no pending task need to flush
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -890,16 +890,16 @@ void ff_dnn_free_model_ov(DNNModel **model)
|
|||||||
if (item && item->infer_request) {
|
if (item && item->infer_request) {
|
||||||
ie_infer_request_free(&item->infer_request);
|
ie_infer_request_free(&item->infer_request);
|
||||||
}
|
}
|
||||||
av_freep(&item->inferences);
|
av_freep(&item->lltasks);
|
||||||
av_freep(&item);
|
av_freep(&item);
|
||||||
}
|
}
|
||||||
ff_safe_queue_destroy(ov_model->request_queue);
|
ff_safe_queue_destroy(ov_model->request_queue);
|
||||||
|
|
||||||
while (ff_queue_size(ov_model->inference_queue) != 0) {
|
while (ff_queue_size(ov_model->lltask_queue) != 0) {
|
||||||
InferenceItem *item = ff_queue_pop_front(ov_model->inference_queue);
|
LastLevelTaskItem *item = ff_queue_pop_front(ov_model->lltask_queue);
|
||||||
av_freep(&item);
|
av_freep(&item);
|
||||||
}
|
}
|
||||||
ff_queue_destroy(ov_model->inference_queue);
|
ff_queue_destroy(ov_model->lltask_queue);
|
||||||
|
|
||||||
while (ff_queue_size(ov_model->task_queue) != 0) {
|
while (ff_queue_size(ov_model->task_queue) != 0) {
|
||||||
TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
|
TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
|
||||||
|
@ -58,7 +58,7 @@ typedef struct TFModel{
|
|||||||
TF_Session *session;
|
TF_Session *session;
|
||||||
TF_Status *status;
|
TF_Status *status;
|
||||||
SafeQueue *request_queue;
|
SafeQueue *request_queue;
|
||||||
Queue *inference_queue;
|
Queue *lltask_queue;
|
||||||
Queue *task_queue;
|
Queue *task_queue;
|
||||||
} TFModel;
|
} TFModel;
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ typedef struct TFInferRequest {
|
|||||||
|
|
||||||
typedef struct TFRequestItem {
|
typedef struct TFRequestItem {
|
||||||
TFInferRequest *infer_request;
|
TFInferRequest *infer_request;
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
TF_Status *status;
|
TF_Status *status;
|
||||||
DNNAsyncExecModule exec_module;
|
DNNAsyncExecModule exec_module;
|
||||||
} TFRequestItem;
|
} TFRequestItem;
|
||||||
@ -90,7 +90,7 @@ static const AVOption dnn_tensorflow_options[] = {
|
|||||||
|
|
||||||
AVFILTER_DEFINE_CLASS(dnn_tensorflow);
|
AVFILTER_DEFINE_CLASS(dnn_tensorflow);
|
||||||
|
|
||||||
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue);
|
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
|
||||||
static void infer_completion_callback(void *args);
|
static void infer_completion_callback(void *args);
|
||||||
static inline void destroy_request_item(TFRequestItem **arg);
|
static inline void destroy_request_item(TFRequestItem **arg);
|
||||||
|
|
||||||
@ -158,8 +158,8 @@ static DNNReturnType tf_start_inference(void *args)
|
|||||||
{
|
{
|
||||||
TFRequestItem *request = args;
|
TFRequestItem *request = args;
|
||||||
TFInferRequest *infer_request = request->infer_request;
|
TFInferRequest *infer_request = request->infer_request;
|
||||||
InferenceItem *inference = request->inference;
|
LastLevelTaskItem *lltask = request->lltask;
|
||||||
TaskItem *task = inference->task;
|
TaskItem *task = lltask->task;
|
||||||
TFModel *tf_model = task->model;
|
TFModel *tf_model = task->model;
|
||||||
|
|
||||||
if (!request) {
|
if (!request) {
|
||||||
@ -196,27 +196,27 @@ static inline void destroy_request_item(TFRequestItem **arg) {
|
|||||||
request = *arg;
|
request = *arg;
|
||||||
tf_free_request(request->infer_request);
|
tf_free_request(request->infer_request);
|
||||||
av_freep(&request->infer_request);
|
av_freep(&request->infer_request);
|
||||||
av_freep(&request->inference);
|
av_freep(&request->lltask);
|
||||||
TF_DeleteStatus(request->status);
|
TF_DeleteStatus(request->status);
|
||||||
ff_dnn_async_module_cleanup(&request->exec_module);
|
ff_dnn_async_module_cleanup(&request->exec_module);
|
||||||
av_freep(arg);
|
av_freep(arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DNNReturnType extract_inference_from_task(TaskItem *task, Queue *inference_queue)
|
static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
|
||||||
{
|
{
|
||||||
TFModel *tf_model = task->model;
|
TFModel *tf_model = task->model;
|
||||||
TFContext *ctx = &tf_model->ctx;
|
TFContext *ctx = &tf_model->ctx;
|
||||||
InferenceItem *inference = av_malloc(sizeof(*inference));
|
LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
|
||||||
if (!inference) {
|
if (!lltask) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for InferenceItem\n");
|
av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
task->inference_todo = 1;
|
task->inference_todo = 1;
|
||||||
task->inference_done = 0;
|
task->inference_done = 0;
|
||||||
inference->task = task;
|
lltask->task = task;
|
||||||
if (ff_queue_push_back(inference_queue, inference) < 0) {
|
if (ff_queue_push_back(lltask_queue, lltask) < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Failed to push back inference_queue.\n");
|
av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
|
||||||
av_freep(&inference);
|
av_freep(&lltask);
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
@ -333,7 +333,7 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(&task, tf_model->inference_queue) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(&task, tf_model->lltask_queue) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
||||||
ret = DNN_ERROR;
|
ret = DNN_ERROR;
|
||||||
goto err;
|
goto err;
|
||||||
@ -346,7 +346,7 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = execute_model_tf(request, tf_model->inference_queue);
|
ret = execute_model_tf(request, tf_model->lltask_queue);
|
||||||
*output_width = task.out_frame->width;
|
*output_width = task.out_frame->width;
|
||||||
*output_height = task.out_frame->height;
|
*output_height = task.out_frame->height;
|
||||||
|
|
||||||
@ -901,7 +901,7 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_
|
|||||||
if (!item) {
|
if (!item) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
item->inference = NULL;
|
item->lltask = NULL;
|
||||||
item->infer_request = tf_create_inference_request();
|
item->infer_request = tf_create_inference_request();
|
||||||
if (!item->infer_request) {
|
if (!item->infer_request) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
|
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
|
||||||
@ -919,8 +919,8 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tf_model->inference_queue = ff_queue_create();
|
tf_model->lltask_queue = ff_queue_create();
|
||||||
if (!tf_model->inference_queue) {
|
if (!tf_model->lltask_queue) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -944,15 +944,15 @@ err:
|
|||||||
|
|
||||||
static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
|
static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
|
||||||
DNNData input;
|
DNNData input;
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
TaskItem *task;
|
TaskItem *task;
|
||||||
TFInferRequest *infer_request;
|
TFInferRequest *infer_request;
|
||||||
TFContext *ctx = &tf_model->ctx;
|
TFContext *ctx = &tf_model->ctx;
|
||||||
|
|
||||||
inference = ff_queue_pop_front(tf_model->inference_queue);
|
lltask = ff_queue_pop_front(tf_model->lltask_queue);
|
||||||
av_assert0(inference);
|
av_assert0(lltask);
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
request->inference = inference;
|
request->lltask = lltask;
|
||||||
|
|
||||||
if (get_input_tf(tf_model, &input, task->input_name) != DNN_SUCCESS) {
|
if (get_input_tf(tf_model, &input, task->input_name) != DNN_SUCCESS) {
|
||||||
goto err;
|
goto err;
|
||||||
@ -1030,8 +1030,8 @@ err:
|
|||||||
|
|
||||||
static void infer_completion_callback(void *args) {
|
static void infer_completion_callback(void *args) {
|
||||||
TFRequestItem *request = args;
|
TFRequestItem *request = args;
|
||||||
InferenceItem *inference = request->inference;
|
LastLevelTaskItem *lltask = request->lltask;
|
||||||
TaskItem *task = inference->task;
|
TaskItem *task = lltask->task;
|
||||||
DNNData *outputs;
|
DNNData *outputs;
|
||||||
TFInferRequest *infer_request = request->infer_request;
|
TFInferRequest *infer_request = request->infer_request;
|
||||||
TFModel *tf_model = task->model;
|
TFModel *tf_model = task->model;
|
||||||
@ -1086,20 +1086,20 @@ err:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue)
|
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
|
||||||
{
|
{
|
||||||
TFModel *tf_model;
|
TFModel *tf_model;
|
||||||
TFContext *ctx;
|
TFContext *ctx;
|
||||||
InferenceItem *inference;
|
LastLevelTaskItem *lltask;
|
||||||
TaskItem *task;
|
TaskItem *task;
|
||||||
|
|
||||||
if (ff_queue_size(inference_queue) == 0) {
|
if (ff_queue_size(lltask_queue) == 0) {
|
||||||
destroy_request_item(&request);
|
destroy_request_item(&request);
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
inference = ff_queue_peek_front(inference_queue);
|
lltask = ff_queue_peek_front(lltask_queue);
|
||||||
task = inference->task;
|
task = lltask->task;
|
||||||
tf_model = task->model;
|
tf_model = task->model;
|
||||||
ctx = &tf_model->ctx;
|
ctx = &tf_model->ctx;
|
||||||
|
|
||||||
@ -1155,8 +1155,8 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *
|
|||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extract_inference_from_task(task, tf_model->inference_queue) != DNN_SUCCESS) {
|
if (extract_lltask_from_task(task, tf_model->lltask_queue) != DNN_SUCCESS) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1165,7 +1165,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *
|
|||||||
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
||||||
return DNN_ERROR;
|
return DNN_ERROR;
|
||||||
}
|
}
|
||||||
return execute_model_tf(request, tf_model->inference_queue);
|
return execute_model_tf(request, tf_model->lltask_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
|
DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
|
||||||
@ -1181,7 +1181,7 @@ DNNReturnType ff_dnn_flush_tf(const DNNModel *model)
|
|||||||
TFRequestItem *request;
|
TFRequestItem *request;
|
||||||
DNNReturnType ret;
|
DNNReturnType ret;
|
||||||
|
|
||||||
if (ff_queue_size(tf_model->inference_queue) == 0) {
|
if (ff_queue_size(tf_model->lltask_queue) == 0) {
|
||||||
// no pending task need to flush
|
// no pending task need to flush
|
||||||
return DNN_SUCCESS;
|
return DNN_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -1216,11 +1216,11 @@ void ff_dnn_free_model_tf(DNNModel **model)
|
|||||||
}
|
}
|
||||||
ff_safe_queue_destroy(tf_model->request_queue);
|
ff_safe_queue_destroy(tf_model->request_queue);
|
||||||
|
|
||||||
while (ff_queue_size(tf_model->inference_queue) != 0) {
|
while (ff_queue_size(tf_model->lltask_queue) != 0) {
|
||||||
InferenceItem *item = ff_queue_pop_front(tf_model->inference_queue);
|
LastLevelTaskItem *item = ff_queue_pop_front(tf_model->lltask_queue);
|
||||||
av_freep(&item);
|
av_freep(&item);
|
||||||
}
|
}
|
||||||
ff_queue_destroy(tf_model->inference_queue);
|
ff_queue_destroy(tf_model->lltask_queue);
|
||||||
|
|
||||||
while (ff_queue_size(tf_model->task_queue) != 0) {
|
while (ff_queue_size(tf_model->task_queue) != 0) {
|
||||||
TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
|
TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user