mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-28 20:53:54 +02:00
lavfi/dnn_backend_openvino.c: move the logic for batch mode earlier
This commit is contained in:
parent
e37cc72387
commit
7eb9accc37
@ -432,13 +432,6 @@ static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
|
||||
ctx = &task->ov_model->ctx;
|
||||
|
||||
if (task->async) {
|
||||
if (ff_queue_size(inferenceq) < ctx->options.batch_size) {
|
||||
if (ff_safe_queue_push_front(task->ov_model->request_queue, request) < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
|
||||
return DNN_ERROR;
|
||||
}
|
||||
return DNN_SUCCESS;
|
||||
}
|
||||
ret = fill_model_input_ov(task->ov_model, request);
|
||||
if (ret != DNN_SUCCESS) {
|
||||
return ret;
|
||||
@ -793,6 +786,11 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *i
|
||||
return DNN_ERROR;
|
||||
}
|
||||
|
||||
if (ff_queue_size(ov_model->inference_queue) < ctx->options.batch_size) {
|
||||
// not enough inference items queued for a batch
|
||||
return DNN_SUCCESS;
|
||||
}
|
||||
|
||||
request = ff_safe_queue_pop_front(ov_model->request_queue);
|
||||
if (!request) {
|
||||
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
||||
|
Loading…
Reference in New Issue
Block a user