1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-01-08 13:22:53 +02:00

lavfi/dnn: DNNAsyncExecModule Execution Failure Handling

This commit adds the case handling if the asynchronous execution
of a request fails by checking the exit status of the thread when
joining before starting another execution. On failure, it does the
cleanup as well.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
This commit is contained in:
Shubhanshu Saxena 2021-08-08 16:25:39 +05:30 committed by Guo Yejun
parent 371e5672f3
commit 2063745a93
2 changed files with 28 additions and 5 deletions

View File

@ -23,6 +23,9 @@
#include "dnn_backend_common.h" #include "dnn_backend_common.h"
#define DNN_ASYNC_SUCCESS (void *)0
#define DNN_ASYNC_FAIL (void *)-1
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params) int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
{ {
if (!exec_params) { if (!exec_params) {
@ -79,18 +82,25 @@ static void *async_thread_routine(void *args)
DNNAsyncExecModule *async_module = args; DNNAsyncExecModule *async_module = args;
void *request = async_module->args; void *request = async_module->args;
async_module->start_inference(request); if (async_module->start_inference(request) != DNN_SUCCESS) {
return DNN_ASYNC_FAIL;
}
async_module->callback(request); async_module->callback(request);
return NULL; return DNN_ASYNC_SUCCESS;
} }
DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module) DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
{ {
void *status = 0;
if (!async_module) { if (!async_module) {
return DNN_ERROR; return DNN_ERROR;
} }
#if HAVE_PTHREAD_CANCEL #if HAVE_PTHREAD_CANCEL
pthread_join(async_module->thread_id, NULL); pthread_join(async_module->thread_id, &status);
if (status == DNN_ASYNC_FAIL) {
av_log(NULL, AV_LOG_ERROR, "Last Inference Failed.\n");
return DNN_ERROR;
}
#endif #endif
async_module->start_inference = NULL; async_module->start_inference = NULL;
async_module->callback = NULL; async_module->callback = NULL;
@ -101,6 +111,7 @@ DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module) DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
{ {
int ret; int ret;
void *status = 0;
if (!async_module) { if (!async_module) {
av_log(ctx, AV_LOG_ERROR, "async_module is null when starting async inference.\n"); av_log(ctx, AV_LOG_ERROR, "async_module is null when starting async inference.\n");
@ -108,7 +119,11 @@ DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_
} }
#if HAVE_PTHREAD_CANCEL #if HAVE_PTHREAD_CANCEL
pthread_join(async_module->thread_id, NULL); pthread_join(async_module->thread_id, &status);
if (status == DNN_ASYNC_FAIL) {
av_log(ctx, AV_LOG_ERROR, "Unable to start inference as previous inference failed.\n");
return DNN_ERROR;
}
ret = pthread_create(&async_module->thread_id, NULL, async_thread_routine, async_module); ret = pthread_create(&async_module->thread_id, NULL, async_thread_routine, async_module);
if (ret != 0) { if (ret != 0) {
av_log(ctx, AV_LOG_ERROR, "Unable to start async inference.\n"); av_log(ctx, AV_LOG_ERROR, "Unable to start async inference.\n");

View File

@ -91,6 +91,7 @@ AVFILTER_DEFINE_CLASS(dnn_tensorflow);
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue); static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue);
static void infer_completion_callback(void *args); static void infer_completion_callback(void *args);
static inline void destroy_request_item(TFRequestItem **arg);
static void free_buffer(void *data, size_t length) static void free_buffer(void *data, size_t length)
{ {
@ -172,6 +173,10 @@ static DNNReturnType tf_start_inference(void *args)
request->status); request->status);
if (TF_GetCode(request->status) != TF_OK) { if (TF_GetCode(request->status) != TF_OK) {
av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status)); av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
tf_free_request(infer_request);
if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
destroy_request_item(&request);
}
return DNN_ERROR; return DNN_ERROR;
} }
return DNN_SUCCESS; return DNN_SUCCESS;
@ -1095,7 +1100,10 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_q
} }
if (task->async) { if (task->async) {
return ff_dnn_start_inference_async(ctx, &request->exec_module); if (ff_dnn_start_inference_async(ctx, &request->exec_module) != DNN_SUCCESS) {
goto err;
}
return DNN_SUCCESS;
} else { } else {
if (tf_start_inference(request) != DNN_SUCCESS) { if (tf_start_inference(request) != DNN_SUCCESS) {
goto err; goto err;