mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
avcodec/mpegpicture: Use ThreadProgress instead of ThreadFrame API
Given that MPVPictures are already directly shared between threads in case of frame-threaded decoding, one can simply use it to pass decoding progress information between threads. This allows to avoid one level of indirection; it also means avoids allocations (of the ThreadFrameProgress structure) in case of frame-threading and indeed makes ff_thread_release_ext_buffer() decoder-only (actually, H.264-decoder-only). Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
5475000942
commit
45cf0541cf
@ -34,6 +34,7 @@
|
||||
#include "mpegutils.h"
|
||||
#include "mpegvideo.h"
|
||||
#include "threadframe.h"
|
||||
#include "threadprogress.h"
|
||||
|
||||
/**
|
||||
* @param stride the number of MVs to get to the next row
|
||||
@ -409,8 +410,12 @@ static void guess_mv(ERContext *s)
|
||||
set_mv_strides(s, &mot_step, &mot_stride);
|
||||
|
||||
num_avail = 0;
|
||||
if (s->last_pic.motion_val[0])
|
||||
ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0);
|
||||
if (s->last_pic.motion_val[0]) {
|
||||
if (s->last_pic.tf)
|
||||
ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0);
|
||||
else
|
||||
ff_thread_progress_await(s->last_pic.progress, mb_height - 1);
|
||||
}
|
||||
for (i = 0; i < mb_width * mb_height; i++) {
|
||||
const int mb_xy = s->mb_index2xy[i];
|
||||
int f = 0;
|
||||
@ -763,7 +768,7 @@ static int is_intra_more_likely(ERContext *s)
|
||||
if (s->avctx->codec_id == AV_CODEC_ID_H264) {
|
||||
// FIXME
|
||||
} else {
|
||||
ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
|
||||
ff_thread_progress_await(s->last_pic.progress, mb_y);
|
||||
}
|
||||
is_intra_likely += s->sad(NULL, last_mb_ptr, mb_ptr,
|
||||
linesize[0], 16);
|
||||
@ -1198,7 +1203,7 @@ void ff_er_frame_end(ERContext *s, int *decode_error_flags)
|
||||
int time_pb = s->pb_time;
|
||||
|
||||
av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264);
|
||||
ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
|
||||
ff_thread_progress_await(s->next_pic.progress, mb_y);
|
||||
|
||||
s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
|
||||
s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
|
||||
|
@ -40,6 +40,7 @@
|
||||
typedef struct ERPicture {
|
||||
AVFrame *f;
|
||||
const struct ThreadFrame *tf;
|
||||
const struct ThreadProgress *progress;
|
||||
|
||||
// it is the caller's responsibility to allocate these buffers
|
||||
int16_t (*motion_val[2])[2];
|
||||
|
@ -45,7 +45,7 @@
|
||||
#include "internal.h"
|
||||
#include "profiles.h"
|
||||
#include "qpeldsp.h"
|
||||
#include "threadframe.h"
|
||||
#include "threadprogress.h"
|
||||
#include "xvididct.h"
|
||||
#include "unary.h"
|
||||
|
||||
@ -1813,7 +1813,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
s->last_mv[i][1][1] = 0;
|
||||
}
|
||||
|
||||
ff_thread_await_progress(&s->next_pic.ptr->tf, s->mb_y, 0);
|
||||
ff_thread_progress_await(&s->next_pic.ptr->progress, s->mb_y);
|
||||
}
|
||||
|
||||
/* if we skipped it in the future P-frame than skip it now too */
|
||||
@ -2018,10 +2018,10 @@ end:
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
|
||||
ff_thread_await_progress(&s->next_pic.ptr->tf,
|
||||
ff_thread_progress_await(&s->next_pic.ptr->progress,
|
||||
(s->mb_x + delta >= s->mb_width)
|
||||
? FFMIN(s->mb_y + 1, s->mb_height - 1)
|
||||
: s->mb_y, 0);
|
||||
: s->mb_y);
|
||||
if (s->next_pic.mbskip_table[xy + delta])
|
||||
return SLICE_OK;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ static void set_erpic(ERPicture *dst, const MPVPicture *src)
|
||||
}
|
||||
|
||||
dst->f = src->f;
|
||||
dst->tf = &src->tf;
|
||||
dst->progress = &src->progress;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
dst->motion_val[i] = src->motion_val[i];
|
||||
|
@ -28,13 +28,13 @@
|
||||
#include "motion_est.h"
|
||||
#include "mpegpicture.h"
|
||||
#include "refstruct.h"
|
||||
#include "threadframe.h"
|
||||
|
||||
static void mpv_pic_reset(FFRefStructOpaque unused, void *obj)
|
||||
{
|
||||
MPVPicture *pic = obj;
|
||||
|
||||
ff_thread_release_ext_buffer(&pic->tf);
|
||||
av_frame_unref(pic->f);
|
||||
ff_thread_progress_reset(&pic->progress);
|
||||
|
||||
ff_refstruct_unref(&pic->hwaccel_picture_private);
|
||||
|
||||
@ -65,14 +65,18 @@ static void mpv_pic_reset(FFRefStructOpaque unused, void *obj)
|
||||
pic->coded_picture_number = 0;
|
||||
}
|
||||
|
||||
static int av_cold mpv_pic_init(FFRefStructOpaque unused, void *obj)
|
||||
static int av_cold mpv_pic_init(FFRefStructOpaque opaque, void *obj)
|
||||
{
|
||||
MPVPicture *pic = obj;
|
||||
int ret, init_progress = (uintptr_t)opaque.nc;
|
||||
|
||||
ret = ff_thread_progress_init(&pic->progress, init_progress);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pic->f = av_frame_alloc();
|
||||
if (!pic->f)
|
||||
return AVERROR(ENOMEM);
|
||||
pic->tf.f = pic->f;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -80,12 +84,15 @@ static void av_cold mpv_pic_free(FFRefStructOpaque unused, void *obj)
|
||||
{
|
||||
MPVPicture *pic = obj;
|
||||
|
||||
ff_thread_progress_destroy(&pic->progress);
|
||||
av_frame_free(&pic->f);
|
||||
}
|
||||
|
||||
av_cold FFRefStructPool *ff_mpv_alloc_pic_pool(void)
|
||||
av_cold FFRefStructPool *ff_mpv_alloc_pic_pool(int init_progress)
|
||||
{
|
||||
return ff_refstruct_pool_alloc_ext(sizeof(MPVPicture), 0, NULL,
|
||||
return ff_refstruct_pool_alloc_ext(sizeof(MPVPicture),
|
||||
FF_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR,
|
||||
(void*)(uintptr_t)init_progress,
|
||||
mpv_pic_init, mpv_pic_reset, mpv_pic_free, NULL);
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "motion_est.h"
|
||||
#include "threadframe.h"
|
||||
#include "threadprogress.h"
|
||||
|
||||
#define MPV_MAX_PLANES 3
|
||||
#define EDGE_WIDTH 16
|
||||
@ -55,7 +55,6 @@ typedef struct BufferPoolContext {
|
||||
*/
|
||||
typedef struct MPVPicture {
|
||||
struct AVFrame *f;
|
||||
ThreadFrame tf;
|
||||
|
||||
int8_t *qscale_table_base;
|
||||
int8_t *qscale_table;
|
||||
@ -87,6 +86,8 @@ typedef struct MPVPicture {
|
||||
|
||||
int display_picture_number;
|
||||
int coded_picture_number;
|
||||
|
||||
ThreadProgress progress;
|
||||
} MPVPicture;
|
||||
|
||||
typedef struct MPVWorkPicture {
|
||||
@ -111,7 +112,7 @@ typedef struct MPVWorkPicture {
|
||||
/**
|
||||
* Allocate a pool of MPVPictures.
|
||||
*/
|
||||
struct FFRefStructPool *ff_mpv_alloc_pic_pool(void);
|
||||
struct FFRefStructPool *ff_mpv_alloc_pic_pool(int init_progress);
|
||||
|
||||
/**
|
||||
* Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself)
|
||||
|
@ -40,11 +40,13 @@
|
||||
#include "mpeg4videodec.h"
|
||||
#include "refstruct.h"
|
||||
#include "thread.h"
|
||||
#include "threadframe.h"
|
||||
#include "threadprogress.h"
|
||||
#include "wmv2dec.h"
|
||||
|
||||
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
|
||||
{
|
||||
enum ThreadingStatus thread_status;
|
||||
|
||||
ff_mpv_common_defaults(s);
|
||||
|
||||
s->avctx = avctx;
|
||||
@ -59,9 +61,12 @@ int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
|
||||
ff_mpv_idct_init(s);
|
||||
ff_h264chroma_init(&s->h264chroma, 8); //for lowres
|
||||
|
||||
if (!s->picture_pool && // VC-1 can call this multiple times
|
||||
ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool))) {
|
||||
s->picture_pool = ff_mpv_alloc_pic_pool();
|
||||
if (s->picture_pool) // VC-1 can call this multiple times
|
||||
return 0;
|
||||
|
||||
thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
|
||||
if (thread_status != FF_THREAD_IS_COPY) {
|
||||
s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
|
||||
if (!s->picture_pool)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@ -229,7 +234,6 @@ static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
|
||||
|
||||
dst->ptr = pic;
|
||||
|
||||
pic->tf.f = pic->f;
|
||||
pic->reference = reference;
|
||||
|
||||
/* WM Image / Screen codecs allocate internal buffers with different
|
||||
@ -237,8 +241,8 @@ static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
|
||||
if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
|
||||
avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
|
||||
avctx->codec_id != AV_CODEC_ID_MSS2) {
|
||||
ret = ff_thread_get_ext_buffer(avctx, &pic->tf,
|
||||
reference ? AV_GET_BUFFER_FLAG_REF : 0);
|
||||
ret = ff_thread_get_buffer(avctx, pic->f,
|
||||
reference ? AV_GET_BUFFER_FLAG_REF : 0);
|
||||
} else {
|
||||
pic->f->width = avctx->width;
|
||||
pic->f->height = avctx->height;
|
||||
@ -281,8 +285,7 @@ static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
|
||||
pic = dst->ptr;
|
||||
pic->dummy = 1;
|
||||
|
||||
ff_thread_report_progress(&pic->tf, INT_MAX, 0);
|
||||
ff_thread_report_progress(&pic->tf, INT_MAX, 1);
|
||||
ff_thread_progress_report(&pic->progress, INT_MAX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -418,7 +421,7 @@ void ff_mpv_frame_end(MpegEncContext *s)
|
||||
emms_c();
|
||||
|
||||
if (s->cur_pic.reference)
|
||||
ff_thread_report_progress(&s->cur_pic.ptr->tf, INT_MAX, 0);
|
||||
ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
|
||||
}
|
||||
|
||||
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
|
||||
@ -484,7 +487,7 @@ void ff_mpeg_flush(AVCodecContext *avctx)
|
||||
void ff_mpv_report_decode_progress(MpegEncContext *s)
|
||||
{
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
|
||||
ff_thread_report_progress(&s->cur_pic.ptr->tf, s->mb_y, 0);
|
||||
ff_thread_progress_report(&s->cur_pic.ptr->progress, s->mb_y);
|
||||
}
|
||||
|
||||
|
||||
|
@ -823,7 +823,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
!FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
|
||||
!FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
|
||||
!(s->new_pic = av_frame_alloc()) ||
|
||||
!(s->picture_pool = ff_mpv_alloc_pic_pool()))
|
||||
!(s->picture_pool = ff_mpv_alloc_pic_pool(0)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* Allocate MV tables; the MV and MB tables will be copied
|
||||
|
@ -124,12 +124,12 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
||||
if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12 &&
|
||||
s->avctx->active_thread_type & FF_THREAD_FRAME) {
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
ff_thread_await_progress(&s->last_pic.ptr->tf,
|
||||
lowest_referenced_row(s, 0), 0);
|
||||
ff_thread_progress_await(&s->last_pic.ptr->progress,
|
||||
lowest_referenced_row(s, 0));
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
ff_thread_await_progress(&s->next_pic.ptr->tf,
|
||||
lowest_referenced_row(s, 1), 0);
|
||||
ff_thread_progress_await(&s->next_pic.ptr->progress,
|
||||
lowest_referenced_row(s, 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -996,11 +996,6 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||
int ret;
|
||||
|
||||
f->owner[0] = f->owner[1] = avctx;
|
||||
/* Hint: It is possible for this function to be called with codecs
|
||||
* that don't support frame threading at all, namely in case
|
||||
* a frame-threaded decoder shares code with codecs that are not.
|
||||
* This currently affects non-MPEG-4 mpegvideo codecs.
|
||||
* The following check will always be true for them. */
|
||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
return ff_get_buffer(avctx, f->f, flags);
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include "qpeldsp.h"
|
||||
#include "rectangle.h"
|
||||
#include "thread.h"
|
||||
#include "threadframe.h"
|
||||
#include "threadprogress.h"
|
||||
|
||||
#include "rv34vlc.h"
|
||||
#include "rv34data.h"
|
||||
@ -721,8 +721,8 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
/* wait for the referenced mb row to be finished */
|
||||
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
|
||||
const ThreadFrame *f = dir ? &s->next_pic.ptr->tf : &s->last_pic.ptr->tf;
|
||||
ff_thread_await_progress(f, mb_row, 0);
|
||||
const ThreadProgress *p = dir ? &s->next_pic.ptr->progress : &s->last_pic.ptr->progress;
|
||||
ff_thread_progress_await(p, mb_row);
|
||||
}
|
||||
|
||||
dxy = ly*4 + lx;
|
||||
@ -901,7 +901,7 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
|
||||
//surprisingly, it uses motion scheme from next reference frame
|
||||
/* wait for the current mb row to be finished */
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_await_progress(&s->next_pic.ptr->tf, FFMAX(0, s->mb_y-1), 0);
|
||||
ff_thread_progress_await(&s->next_pic.ptr->progress, FFMAX(0, s->mb_y-1));
|
||||
|
||||
next_bt = s->next_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride];
|
||||
if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
|
||||
@ -1485,8 +1485,8 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
r->loop_filter(r, s->mb_y - 2);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->cur_pic.ptr->tf,
|
||||
s->mb_y - 2, 0);
|
||||
ff_thread_progress_report(&s->cur_pic.ptr->progress,
|
||||
s->mb_y - 2);
|
||||
|
||||
}
|
||||
if(s->mb_x == s->resync_mb_x)
|
||||
@ -1584,7 +1584,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
|
||||
s->mb_num_left = 0;
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->cur_pic.ptr->tf, INT_MAX, 0);
|
||||
ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
if ((ret = av_frame_ref(pict, s->cur_pic.ptr->f)) < 0)
|
||||
@ -1812,7 +1812,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
ff_er_frame_end(&s->er, NULL);
|
||||
ff_mpv_frame_end(s);
|
||||
s->mb_num_left = 0;
|
||||
ff_thread_report_progress(&s->cur_pic.ptr->tf, INT_MAX, 0);
|
||||
ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user