mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
pthread_frame: allow per-field ThreadFrame owners.
This tries to handle cases where separate invocations of decode_frame() (each running in separate threads) write to respective fields in the same AVFrame->data[]. Having per-field owners makes interaction between readers (the referencing thread) and writers (the decoding thread) slightly more optimal if both accesses are field-based, since they will use the respective producer's thread objects (mutex/cond) instead of sharing the thread objects of the first field's producer. In practice, this fixes the following tsan-warning in fate-h264: WARNING: ThreadSanitizer: data race (pid=21615) Read of size 4 at 0x7d640000d9fc by thread T2 (mutexes: write M1006): #0 ff_thread_report_progress pthread_frame.c:569 (ffmpeg:x86_64+0x100f7cf54) [..] Previous write of size 4 at 0x7d640000d9fc by main thread (mutexes: write M1004): #0 update_context_from_user pthread_frame.c:335 (ffmpeg:x86_64+0x100f81abb)
This commit is contained in:
parent
ac24a8202a
commit
083300bea9
@ -1423,14 +1423,14 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
|
||||
* We have to do that before the "dummy" in-between frame allocation,
|
||||
* since that can modify h->cur_pic_ptr. */
|
||||
if (h->first_field) {
|
||||
int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
|
||||
av_assert0(h->cur_pic_ptr);
|
||||
av_assert0(h->cur_pic_ptr->f->buf[0]);
|
||||
assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (h->cur_pic_ptr->tf.owner == h->avctx) {
|
||||
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
|
||||
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
|
||||
}
|
||||
|
||||
/* figure out if we have a complementary field pair */
|
||||
@ -1568,7 +1568,9 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
int field = h->picture_structure == PICT_BOTTOM_FIELD;
|
||||
release_unused_pictures(h, 0);
|
||||
h->cur_pic_ptr->tf.owner[field] = h->avctx;
|
||||
}
|
||||
/* Some macroblocks can be accessed before they're available in case
|
||||
* of lost slices, MBAFF or threading. */
|
||||
|
@ -564,10 +564,11 @@ void ff_thread_report_progress(ThreadFrame *f, int n, int field)
|
||||
atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
|
||||
return;
|
||||
|
||||
p = f->owner->internal->thread_ctx;
|
||||
p = f->owner[field]->internal->thread_ctx;
|
||||
|
||||
if (f->owner->debug&FF_DEBUG_THREADS)
|
||||
av_log(f->owner, AV_LOG_DEBUG, "%p finished %d field %d\n", progress, n, field);
|
||||
if (f->owner[field]->debug&FF_DEBUG_THREADS)
|
||||
av_log(f->owner[field], AV_LOG_DEBUG,
|
||||
"%p finished %d field %d\n", progress, n, field);
|
||||
|
||||
pthread_mutex_lock(&p->progress_mutex);
|
||||
|
||||
@ -586,10 +587,11 @@ void ff_thread_await_progress(ThreadFrame *f, int n, int field)
|
||||
atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
|
||||
return;
|
||||
|
||||
p = f->owner->internal->thread_ctx;
|
||||
p = f->owner[field]->internal->thread_ctx;
|
||||
|
||||
if (f->owner->debug&FF_DEBUG_THREADS)
|
||||
av_log(f->owner, AV_LOG_DEBUG, "thread awaiting %d field %d from %p\n", n, field, progress);
|
||||
if (f->owner[field]->debug&FF_DEBUG_THREADS)
|
||||
av_log(f->owner[field], AV_LOG_DEBUG,
|
||||
"thread awaiting %d field %d from %p\n", n, field, progress);
|
||||
|
||||
pthread_mutex_lock(&p->progress_mutex);
|
||||
while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
|
||||
@ -882,7 +884,7 @@ static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int
|
||||
PerThreadContext *p = avctx->internal->thread_ctx;
|
||||
int err;
|
||||
|
||||
f->owner = avctx;
|
||||
f->owner[0] = f->owner[1] = avctx;
|
||||
|
||||
ff_init_buffer_info(avctx, f->f);
|
||||
|
||||
@ -986,7 +988,7 @@ void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
||||
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
|
||||
|
||||
av_buffer_unref(&f->progress);
|
||||
f->owner = NULL;
|
||||
f->owner[0] = f->owner[1] = NULL;
|
||||
|
||||
if (can_direct_free) {
|
||||
av_frame_unref(f->f);
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
typedef struct ThreadFrame {
|
||||
AVFrame *f;
|
||||
AVCodecContext *owner;
|
||||
AVCodecContext *owner[2];
|
||||
// progress->data is an array of 2 ints holding progress for top/bottom
|
||||
// fields
|
||||
AVBufferRef *progress;
|
||||
|
@ -3971,7 +3971,8 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dst->owner = src->owner;
|
||||
dst->owner[0] = src->owner[0];
|
||||
dst->owner[1] = src->owner[1];
|
||||
|
||||
ret = av_frame_ref(dst->f, src->f);
|
||||
if (ret < 0)
|
||||
@ -3981,7 +3982,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
|
||||
|
||||
if (src->progress &&
|
||||
!(dst->progress = av_buffer_ref(src->progress))) {
|
||||
ff_thread_release_buffer(dst->owner, dst);
|
||||
ff_thread_release_buffer(dst->owner[0], dst);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@ -3997,7 +3998,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
|
||||
|
||||
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||
{
|
||||
f->owner = avctx;
|
||||
f->owner[0] = f->owner[1] = avctx;
|
||||
return ff_get_buffer(avctx, f->f, flags);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user