1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-26 19:01:44 +02:00

lavc: do not implicitly share the frame pool between threads

Currently the frame pool used by the default get_buffer2()
implementation is a single struct, allocated when opening the decoder.
A pointer to it is simply copied to each frame thread and we assume that
no thread attempts to modify it at an unexpected time. This is rather
fragile and potentially dangerous.

With this commit, the frame pool is made refcounted, with the reference
being propagated across threads along with other context variables. The
frame pool is now also immutable - when the stream parameters change we
drop the old reference and create a new one.
This commit is contained in:
Anton Khirnov 2017-01-17 16:28:30 +01:00
parent 2944537430
commit 9d6785d426
4 changed files with 99 additions and 57 deletions

View File

@ -45,6 +45,25 @@
#include "internal.h"
#include "thread.h"
typedef struct FramePool {
/**
* Pools for each data plane. For audio all the planes have the same size,
* so only pools[0] is used.
*/
AVBufferPool *pools[4];
/*
* Pool parameters
*/
int format;
int width, height;
int stride_align[AV_NUM_DATA_POINTERS];
int linesize[4];
int planes;
int channels;
int samples;
} FramePool;
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
{
int size = 0, ret;
@ -1504,10 +1523,61 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
return ret;
}
static void frame_pool_free(void *opaque, uint8_t *data)
{
FramePool *pool = (FramePool*)data;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
av_buffer_pool_uninit(&pool->pools[i]);
av_freep(&data);
}
static AVBufferRef *frame_pool_alloc(void)
{
FramePool *pool = av_mallocz(sizeof(*pool));
AVBufferRef *buf;
if (!pool)
return NULL;
buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
frame_pool_free, NULL, 0);
if (!buf) {
av_freep(&pool);
return NULL;
}
return buf;
}
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
{
FramePool *pool = avctx->internal->pool;
int i, ret;
FramePool *pool = avctx->internal->pool ?
(FramePool*)avctx->internal->pool->data : NULL;
AVBufferRef *pool_buf;
int i, ret, ch, planes;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
int planar = av_sample_fmt_is_planar(frame->format);
ch = frame->channels;
planes = planar ? ch : 1;
}
if (pool && pool->format == frame->format) {
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
pool->width == frame->width && pool->height == frame->height)
return 0;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
pool->channels == ch && frame->nb_samples == pool->samples)
return 0;
}
pool_buf = frame_pool_alloc();
if (!pool_buf)
return AVERROR(ENOMEM);
pool = (FramePool*)pool_buf->data;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO: {
@ -1518,10 +1588,6 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
int h = frame->height;
int tmpsize, unaligned;
if (pool->format == frame->format &&
pool->width == frame->width && pool->height == frame->height)
return 0;
avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
do {
@ -1529,7 +1595,7 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
if (ret < 0)
return ret;
goto fail;
// increase alignment of w for next try (rhs gives the lowest bit set in w)
w += w & ~(w - 1);
@ -1540,15 +1606,16 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
NULL, linesize);
if (tmpsize < 0)
return tmpsize;
if (tmpsize < 0) {
ret = tmpsize;
goto fail;
}
for (i = 0; i < 3 && data[i + 1]; i++)
size[i] = data[i + 1] - data[i];
size[i] = tmpsize - (data[i] - data[0]);
for (i = 0; i < 4; i++) {
av_buffer_pool_uninit(&pool->pools[i]);
pool->linesize[i] = linesize[i];
if (size[i]) {
pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
@ -1568,15 +1635,6 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
break;
}
case AVMEDIA_TYPE_AUDIO: {
int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
int planar = av_sample_fmt_is_planar(frame->format);
int planes = planar ? ch : 1;
if (pool->format == frame->format && pool->planes == planes &&
pool->channels == ch && frame->nb_samples == pool->samples)
return 0;
av_buffer_pool_uninit(&pool->pools[0]);
ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
frame->nb_samples, frame->format, 0);
if (ret < 0)
@ -1596,19 +1654,19 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
}
default: av_assert0(0);
}
av_buffer_unref(&avctx->internal->pool);
avctx->internal->pool = pool_buf;
return 0;
fail:
for (i = 0; i < 4; i++)
av_buffer_pool_uninit(&pool->pools[i]);
pool->format = -1;
pool->planes = pool->channels = pool->samples = 0;
pool->width = pool->height = 0;
av_buffer_unref(&pool_buf);
return ret;
}
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
FramePool *pool = avctx->internal->pool;
FramePool *pool = (FramePool*)avctx->internal->pool->data;
int planes = pool->planes;
int i;
@ -1653,7 +1711,7 @@ fail:
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
{
FramePool *pool = s->internal->pool;
FramePool *pool = (FramePool*)s->internal->pool->data;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
int i;

View File

@ -108,25 +108,6 @@
# define STRIDE_ALIGN 8
#endif
typedef struct FramePool {
/**
* Pools for each data plane. For audio all the planes have the same size,
* so only pools[0] is used.
*/
AVBufferPool *pools[4];
/*
* Pool parameters
*/
int format;
int width, height;
int stride_align[AV_NUM_DATA_POINTERS];
int linesize[4];
int planes;
int channels;
int samples;
} FramePool;
typedef struct DecodeSimpleContext {
AVPacket *in_pkt;
AVFrame *out_frame;
@ -154,7 +135,7 @@ typedef struct AVCodecInternal {
AVFrame *to_free;
FramePool *pool;
AVBufferRef *pool;
void *thread_ctx;

View File

@ -296,6 +296,17 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
}
dst->hwaccel_flags = src->hwaccel_flags;
if (!!dst->internal->pool != !!src->internal->pool ||
(dst->internal->pool && dst->internal->pool->data != src->internal->pool->data)) {
av_buffer_unref(&dst->internal->pool);
if (src->internal->pool) {
dst->internal->pool = av_buffer_ref(src->internal->pool);
if (!dst->internal->pool)
return AVERROR(ENOMEM);
}
}
}
if (for_user) {
@ -714,6 +725,7 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
}
if (p->avctx) {
av_buffer_unref(&p->avctx->internal->pool);
av_freep(&p->avctx->internal);
av_buffer_unref(&p->avctx->hw_frames_ctx);
}

View File

@ -583,12 +583,6 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
}
avctx->internal = avci;
avci->pool = av_mallocz(sizeof(*avci->pool));
if (!avci->pool) {
ret = AVERROR(ENOMEM);
goto free_and_end;
}
avci->to_free = av_frame_alloc();
if (!avci->to_free) {
ret = AVERROR(ENOMEM);
@ -1076,7 +1070,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_packet_free(&avci->ds.in_pkt);
ff_decode_bsfs_uninit(avctx);
av_freep(&avci->pool);
av_buffer_unref(&avci->pool);
}
av_freep(&avci);
avctx->internal = NULL;
@ -1111,7 +1105,6 @@ av_cold int avcodec_close(AVCodecContext *avctx)
return 0;
if (avcodec_is_open(avctx)) {
FramePool *pool = avctx->internal->pool;
if (CONFIG_FRAME_THREAD_ENCODER &&
avctx->internal->frame_thread_encoder && avctx->thread_count > 1) {
ff_frame_thread_encoder_free(avctx);
@ -1130,9 +1123,7 @@ av_cold int avcodec_close(AVCodecContext *avctx)
av_packet_free(&avctx->internal->ds.in_pkt);
for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
av_buffer_pool_uninit(&pool->pools[i]);
av_freep(&avctx->internal->pool);
av_buffer_unref(&avctx->internal->pool);
if (avctx->hwaccel && avctx->hwaccel->uninit)
avctx->hwaccel->uninit(avctx);