You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-15 14:13:16 +02:00
avcodec: v4l2_m2m: remove unnecessary timeout.
Qualcomm's db410c/db820 Venus driver currently present in mainline kernel has a bug which mishandles the CMD_STOP requests causing the decoder to block while draining [1]. This patch removes the workaround that was used to prevent that situation. Encoding/Decoding tested on db820c. [1] on CMD_STOP, the driver is flushing all buffers and never raising IPIPE which ends up in blocking on poll.
This commit is contained in:
committed by
Mark Thompson
parent
a0c624e299
commit
5d5de3eba4
@@ -213,8 +213,14 @@ static void v4l2_free_buffer(void *opaque, uint8_t *unused)
|
|||||||
if (s->reinit) {
|
if (s->reinit) {
|
||||||
if (!atomic_load(&s->refcount))
|
if (!atomic_load(&s->refcount))
|
||||||
sem_post(&s->refsync);
|
sem_post(&s->refsync);
|
||||||
} else if (avbuf->context->streamon)
|
} else {
|
||||||
|
if (s->draining) {
|
||||||
|
/* no need to queue more buffers to the driver */
|
||||||
|
avbuf->status = V4L2BUF_AVAILABLE;
|
||||||
|
}
|
||||||
|
else if (avbuf->context->streamon)
|
||||||
ff_v4l2_buffer_enqueue(avbuf);
|
ff_v4l2_buffer_enqueue(avbuf);
|
||||||
|
}
|
||||||
|
|
||||||
av_buffer_unref(&avbuf->context_ref);
|
av_buffer_unref(&avbuf->context_ref);
|
||||||
}
|
}
|
||||||
|
@@ -217,6 +217,7 @@ static int v4l2_stop_decode(V4L2Context *ctx)
|
|||||||
{
|
{
|
||||||
struct v4l2_decoder_cmd cmd = {
|
struct v4l2_decoder_cmd cmd = {
|
||||||
.cmd = V4L2_DEC_CMD_STOP,
|
.cmd = V4L2_DEC_CMD_STOP,
|
||||||
|
.flags = 0,
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -234,6 +235,7 @@ static int v4l2_stop_encode(V4L2Context *ctx)
|
|||||||
{
|
{
|
||||||
struct v4l2_encoder_cmd cmd = {
|
struct v4l2_encoder_cmd cmd = {
|
||||||
.cmd = V4L2_ENC_CMD_STOP,
|
.cmd = V4L2_ENC_CMD_STOP,
|
||||||
|
.flags = 0,
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -256,10 +258,26 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
|
|||||||
.events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
|
.events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
|
||||||
.fd = ctx_to_m2mctx(ctx)->fd,
|
.fd = ctx_to_m2mctx(ctx)->fd,
|
||||||
};
|
};
|
||||||
int ret;
|
int i, ret;
|
||||||
|
|
||||||
|
/* if we are draining and there are no more capture buffers queued in the driver we are done */
|
||||||
|
if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
|
||||||
|
for (i = 0; i < ctx->num_buffers; i++) {
|
||||||
|
if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
|
||||||
|
goto start;
|
||||||
|
}
|
||||||
|
ctx->done = 1;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
start:
|
||||||
if (V4L2_TYPE_IS_OUTPUT(ctx->type))
|
if (V4L2_TYPE_IS_OUTPUT(ctx->type))
|
||||||
pfd.events = POLLOUT | POLLWRNORM;
|
pfd.events = POLLOUT | POLLWRNORM;
|
||||||
|
else {
|
||||||
|
/* no need to listen to requests for more input while draining */
|
||||||
|
if (ctx_to_m2mctx(ctx)->draining)
|
||||||
|
pfd.events = POLLIN | POLLRDNORM | POLLPRI;
|
||||||
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ret = poll(&pfd, 1, timeout);
|
ret = poll(&pfd, 1, timeout);
|
||||||
@@ -267,11 +285,6 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
|
|||||||
break;
|
break;
|
||||||
if (errno == EINTR)
|
if (errno == EINTR)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* timeout is being used to indicate last valid bufer when draining */
|
|
||||||
if (ctx_to_m2mctx(ctx)->draining)
|
|
||||||
ctx->done = 1;
|
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -286,7 +299,7 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
|
|||||||
ret = v4l2_handle_event(ctx);
|
ret = v4l2_handle_event(ctx);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
/* if re-init failed, abort */
|
/* if re-init failed, abort */
|
||||||
ctx->done = EINVAL;
|
ctx->done = 1;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -325,12 +338,14 @@ dequeue:
|
|||||||
ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
|
ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (errno != EAGAIN) {
|
if (errno != EAGAIN) {
|
||||||
ctx->done = errno;
|
ctx->done = 1;
|
||||||
if (errno != EPIPE)
|
if (errno != EPIPE)
|
||||||
av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
|
av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
|
||||||
ctx->name, av_err2str(AVERROR(errno)));
|
ctx->name, av_err2str(AVERROR(errno)));
|
||||||
}
|
}
|
||||||
} else {
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
avbuf = &ctx->buffers[buf.index];
|
avbuf = &ctx->buffers[buf.index];
|
||||||
avbuf->status = V4L2BUF_AVAILABLE;
|
avbuf->status = V4L2BUF_AVAILABLE;
|
||||||
avbuf->buf = buf;
|
avbuf->buf = buf;
|
||||||
@@ -338,10 +353,10 @@ dequeue:
|
|||||||
memcpy(avbuf->planes, planes, sizeof(planes));
|
memcpy(avbuf->planes, planes, sizeof(planes));
|
||||||
avbuf->buf.m.planes = avbuf->planes;
|
avbuf->buf.m.planes = avbuf->planes;
|
||||||
}
|
}
|
||||||
}
|
return avbuf;
|
||||||
}
|
}
|
||||||
|
|
||||||
return avbuf;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
|
static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
|
||||||
@@ -552,14 +567,12 @@ int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame)
|
|||||||
{
|
{
|
||||||
V4L2Buffer* avbuf = NULL;
|
V4L2Buffer* avbuf = NULL;
|
||||||
|
|
||||||
/* if we are draining, we are no longer inputing data, therefore enable a
|
/*
|
||||||
* timeout so we can dequeue and flag the last valid buffer.
|
|
||||||
*
|
|
||||||
* blocks until:
|
* blocks until:
|
||||||
* 1. decoded frame available
|
* 1. decoded frame available
|
||||||
* 2. an input buffer is ready to be dequeued
|
* 2. an input buffer is ready to be dequeued
|
||||||
*/
|
*/
|
||||||
avbuf = v4l2_dequeue_v4l2buf(ctx, ctx_to_m2mctx(ctx)->draining ? 200 : -1);
|
avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
|
||||||
if (!avbuf) {
|
if (!avbuf) {
|
||||||
if (ctx->done)
|
if (ctx->done)
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
@@ -574,14 +587,12 @@ int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt)
|
|||||||
{
|
{
|
||||||
V4L2Buffer* avbuf = NULL;
|
V4L2Buffer* avbuf = NULL;
|
||||||
|
|
||||||
/* if we are draining, we are no longer inputing data, therefore enable a
|
/*
|
||||||
* timeout so we can dequeue and flag the last valid buffer.
|
|
||||||
*
|
|
||||||
* blocks until:
|
* blocks until:
|
||||||
* 1. encoded packet available
|
* 1. encoded packet available
|
||||||
* 2. an input buffer ready to be dequeued
|
* 2. an input buffer ready to be dequeued
|
||||||
*/
|
*/
|
||||||
avbuf = v4l2_dequeue_v4l2buf(ctx, ctx_to_m2mctx(ctx)->draining ? 200 : -1);
|
avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
|
||||||
if (!avbuf) {
|
if (!avbuf) {
|
||||||
if (ctx->done)
|
if (ctx->done)
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
|
Reference in New Issue
Block a user