mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
avcodec/pthread_frame: Remove ff_thread_release_buffer()
It is unnecessary since the removal of non-thread-safe callbacks
in e0786a8eeb
. Since then, the
AVCodecContext has only been used as logcontext.
Removing ff_thread_release_buffer() allowed to remove AVCodecContext*
parameters from several other functions (not only unref functions,
but also e.g. ff_h264_ref_picture() which calls ff_h264_unref_picture()
on error).
Reviewed-by: Anton Khirnov <anton@khirnov.net>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
86ed68420d
commit
315c956cbd
@ -55,8 +55,7 @@ speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set FF_CODEC_CAP_ALLOCATE_PROGRESS in
|
||||
AVCodec.caps_internal and use ff_thread_get_buffer() to allocate frames. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
FFCodec.caps_internal and use ff_thread_get_buffer() to allocate frames.
|
||||
Otherwise decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
|
@ -636,9 +636,9 @@ static int get_pixel_format(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
|
||||
static void av1_frame_unref(AV1Frame *f)
|
||||
{
|
||||
ff_thread_release_buffer(avctx, f->f);
|
||||
av_frame_unref(f->f);
|
||||
ff_refstruct_unref(&f->hwaccel_picture_private);
|
||||
ff_refstruct_unref(&f->header_ref);
|
||||
f->raw_frame_header = NULL;
|
||||
@ -689,7 +689,7 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av1_frame_unref(avctx, dst);
|
||||
av1_frame_unref(dst);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@ -699,12 +699,15 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
|
||||
AV1RawMetadataITUTT35 itut_t35;
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
|
||||
av1_frame_unref(avctx, &s->ref[i]);
|
||||
if (s->ref[i].f) {
|
||||
av1_frame_unref(&s->ref[i]);
|
||||
av_frame_free(&s->ref[i].f);
|
||||
}
|
||||
av1_frame_unref(avctx, &s->cur_frame);
|
||||
}
|
||||
if (s->cur_frame.f) {
|
||||
av1_frame_unref(&s->cur_frame);
|
||||
av_frame_free(&s->cur_frame.f);
|
||||
|
||||
}
|
||||
ff_refstruct_unref(&s->seq_ref);
|
||||
ff_refstruct_unref(&s->header_ref);
|
||||
ff_refstruct_unref(&s->cll_ref);
|
||||
@ -916,7 +919,7 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av1_frame_unref(avctx, f);
|
||||
av1_frame_unref(f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1134,7 +1137,7 @@ static int update_reference_list(AVCodecContext *avctx)
|
||||
|
||||
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
||||
if (header->refresh_frame_flags & (1 << i)) {
|
||||
av1_frame_unref(avctx, &s->ref[i]);
|
||||
av1_frame_unref(&s->ref[i]);
|
||||
if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Failed to update frame %d in reference list\n", i);
|
||||
@ -1150,7 +1153,7 @@ static int get_current_frame(AVCodecContext *avctx)
|
||||
AV1DecContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
av1_frame_unref(avctx, &s->cur_frame);
|
||||
av1_frame_unref(&s->cur_frame);
|
||||
|
||||
s->cur_frame.header_ref = ff_refstruct_ref(s->header_ref);
|
||||
|
||||
@ -1257,7 +1260,7 @@ static int av1_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
|
||||
s->raw_frame_header = &obu->obu.frame_header;
|
||||
|
||||
if (s->raw_frame_header->show_existing_frame) {
|
||||
av1_frame_unref(avctx, &s->cur_frame);
|
||||
av1_frame_unref(&s->cur_frame);
|
||||
|
||||
ret = av1_frame_ref(avctx, &s->cur_frame,
|
||||
&s->ref[s->raw_frame_header->frame_to_show_map_idx]);
|
||||
@ -1452,9 +1455,9 @@ static void av1_decode_flush(AVCodecContext *avctx)
|
||||
AV1RawMetadataITUTT35 itut_t35;
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
|
||||
av1_frame_unref(avctx, &s->ref[i]);
|
||||
av1_frame_unref(&s->ref[i]);
|
||||
|
||||
av1_frame_unref(avctx, &s->cur_frame);
|
||||
av1_frame_unref(&s->cur_frame);
|
||||
s->operating_point_idc = 0;
|
||||
s->nb_unit = 0;
|
||||
s->raw_frame_header = NULL;
|
||||
|
@ -885,7 +885,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
AVFrame *p;
|
||||
|
||||
if (f->last_picture.f)
|
||||
ff_thread_release_ext_buffer(avctx, &f->last_picture);
|
||||
ff_thread_release_ext_buffer(&f->last_picture);
|
||||
FFSWAP(ThreadFrame, f->picture, f->last_picture);
|
||||
|
||||
f->cur = p = f->picture.f;
|
||||
@ -1025,7 +1025,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
ff_thread_report_progress(&f->picture, INT_MAX, 0);
|
||||
|
||||
if (f->last_picture.f)
|
||||
ff_thread_release_ext_buffer(avctx, &f->last_picture);
|
||||
ff_thread_release_ext_buffer(&f->last_picture);
|
||||
if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
|
||||
return ret;
|
||||
|
||||
@ -1089,7 +1089,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
|
||||
|
||||
|
||||
ff_thread_release_ext_buffer(dst, &fdst->picture);
|
||||
ff_thread_release_ext_buffer(&fdst->picture);
|
||||
if (fsrc->picture.f->data[0]) {
|
||||
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
|
||||
return ret;
|
||||
@ -1106,12 +1106,12 @@ static av_cold int ffv1_decode_close(AVCodecContext *avctx)
|
||||
FFV1Context *const s = avctx->priv_data;
|
||||
|
||||
if (s->picture.f) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->picture);
|
||||
ff_thread_release_ext_buffer(&s->picture);
|
||||
av_frame_free(&s->picture.f);
|
||||
}
|
||||
|
||||
if (s->last_picture.f) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_picture);
|
||||
ff_thread_release_ext_buffer(&s->last_picture);
|
||||
av_frame_free(&s->last_picture.f);
|
||||
}
|
||||
return ff_ffv1_close(avctx);
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include "thread.h"
|
||||
#include "threadframe.h"
|
||||
|
||||
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
|
||||
void ff_h264_unref_picture(H264Picture *pic)
|
||||
{
|
||||
int off = offsetof(H264Picture, f_grain) + sizeof(pic->f_grain);
|
||||
int i;
|
||||
@ -44,8 +44,8 @@ void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
|
||||
if (!pic->f || !pic->f->buf[0])
|
||||
return;
|
||||
|
||||
ff_thread_release_ext_buffer(h->avctx, &pic->tf);
|
||||
ff_thread_release_buffer(h->avctx, pic->f_grain);
|
||||
ff_thread_release_ext_buffer(&pic->tf);
|
||||
av_frame_unref(pic->f_grain);
|
||||
ff_refstruct_unref(&pic->hwaccel_picture_private);
|
||||
|
||||
av_buffer_unref(&pic->qscale_table_buf);
|
||||
@ -94,7 +94,7 @@ static void h264_copy_picture_params(H264Picture *dst, const H264Picture *src)
|
||||
dst->needs_fg = src->needs_fg;
|
||||
}
|
||||
|
||||
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
|
||||
int ff_h264_ref_picture(H264Picture *dst, const H264Picture *src)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
@ -140,28 +140,28 @@ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, const H264Picture *src
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
ff_h264_unref_picture(h, dst);
|
||||
ff_h264_unref_picture(dst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
|
||||
int ff_h264_replace_picture(H264Picture *dst, const H264Picture *src)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
if (!src->f || !src->f->buf[0]) {
|
||||
ff_h264_unref_picture(h, dst);
|
||||
ff_h264_unref_picture(dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
av_assert0(src->tf.f == src->f);
|
||||
|
||||
dst->tf.f = dst->f;
|
||||
ret = ff_thread_replace_frame(h->avctx, &dst->tf, &src->tf);
|
||||
ret = ff_thread_replace_frame(&dst->tf, &src->tf);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (src->needs_fg) {
|
||||
ff_thread_release_buffer(h->avctx, dst->f_grain);
|
||||
av_frame_unref(dst->f_grain);
|
||||
ret = av_frame_ref(dst->f_grain, src->f_grain);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
@ -190,7 +190,7 @@ int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
ff_h264_unref_picture(h, dst);
|
||||
ff_h264_unref_picture(dst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -572,8 +572,8 @@ void ff_h264_remove_all_refs(H264Context *h)
|
||||
assert(h->long_ref_count == 0);
|
||||
|
||||
if (h->short_ref_count && !h->last_pic_for_ec.f->data[0]) {
|
||||
ff_h264_unref_picture(h, &h->last_pic_for_ec);
|
||||
ff_h264_ref_picture(h, &h->last_pic_for_ec, h->short_ref[0]);
|
||||
ff_h264_unref_picture(&h->last_pic_for_ec);
|
||||
ff_h264_ref_picture(&h->last_pic_for_ec, h->short_ref[0]);
|
||||
}
|
||||
|
||||
for (i = 0; i < h->short_ref_count; i++) {
|
||||
|
@ -121,7 +121,7 @@ static void release_unused_pictures(H264Context *h, int remove_current)
|
||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
|
||||
if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
|
||||
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
|
||||
ff_h264_unref_picture(h, &h->DPB[i]);
|
||||
ff_h264_unref_picture(&h->DPB[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -262,7 +262,7 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
ff_h264_unref_picture(h, pic);
|
||||
ff_h264_unref_picture(pic);
|
||||
return (ret < 0) ? ret : AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@ -396,13 +396,13 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
|
||||
h->droppable = h1->droppable;
|
||||
|
||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
|
||||
ret = ff_h264_replace_picture(h, &h->DPB[i], &h1->DPB[i]);
|
||||
ret = ff_h264_replace_picture(&h->DPB[i], &h1->DPB[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
|
||||
ret = ff_h264_replace_picture(h, &h->cur_pic, &h1->cur_pic);
|
||||
ret = ff_h264_replace_picture(&h->cur_pic, &h1->cur_pic);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -529,12 +529,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
return ret;
|
||||
|
||||
h->cur_pic_ptr = pic;
|
||||
ff_h264_unref_picture(h, &h->cur_pic);
|
||||
ff_h264_unref_picture(&h->cur_pic);
|
||||
if (CONFIG_ERROR_RESILIENCE) {
|
||||
ff_h264_set_erpic(&h->er.cur_pic, NULL);
|
||||
}
|
||||
|
||||
if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
|
||||
if ((ret = ff_h264_ref_picture(&h->cur_pic, h->cur_pic_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < h->nb_slice_ctx; i++) {
|
||||
@ -1541,7 +1541,7 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
|
||||
ff_thread_await_progress(&prev->tf, INT_MAX, 0);
|
||||
if (prev->field_picture)
|
||||
ff_thread_await_progress(&prev->tf, INT_MAX, 1);
|
||||
ff_thread_release_ext_buffer(h->avctx, &h->short_ref[0]->tf);
|
||||
ff_thread_release_ext_buffer(&h->short_ref[0]->tf);
|
||||
h->short_ref[0]->tf.f = h->short_ref[0]->f;
|
||||
ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
|
||||
if (ret < 0)
|
||||
|
@ -339,7 +339,7 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
|
||||
|
||||
static void h264_free_pic(H264Context *h, H264Picture *pic)
|
||||
{
|
||||
ff_h264_unref_picture(h, pic);
|
||||
ff_h264_unref_picture(pic);
|
||||
av_frame_free(&pic->f);
|
||||
av_frame_free(&pic->f_grain);
|
||||
}
|
||||
@ -466,7 +466,7 @@ void ff_h264_flush_change(H264Context *h)
|
||||
h->delayed_pic[j++] = h->delayed_pic[i];
|
||||
h->delayed_pic[j] = NULL;
|
||||
}
|
||||
ff_h264_unref_picture(h, &h->last_pic_for_ec);
|
||||
ff_h264_unref_picture(&h->last_pic_for_ec);
|
||||
|
||||
h->first_field = 0;
|
||||
h->recovery_frame = -1;
|
||||
@ -486,9 +486,9 @@ static void h264_decode_flush(AVCodecContext *avctx)
|
||||
ff_h264_sei_uninit(&h->sei);
|
||||
|
||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||
ff_h264_unref_picture(h, &h->DPB[i]);
|
||||
ff_h264_unref_picture(&h->DPB[i]);
|
||||
h->cur_pic_ptr = NULL;
|
||||
ff_h264_unref_picture(h, &h->cur_pic);
|
||||
ff_h264_unref_picture(&h->cur_pic);
|
||||
|
||||
h->mb_y = 0;
|
||||
|
||||
@ -1024,7 +1024,7 @@ static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
h->setup_finished = 0;
|
||||
h->nb_slice_ctx_queued = 0;
|
||||
|
||||
ff_h264_unref_picture(h, &h->last_pic_for_ec);
|
||||
ff_h264_unref_picture(&h->last_pic_for_ec);
|
||||
|
||||
/* end of stream, output what is still in the buffers */
|
||||
if (buf_size == 0)
|
||||
@ -1076,7 +1076,7 @@ static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
|
||||
av_assert0(pict->buf[0] || !*got_frame);
|
||||
|
||||
ff_h264_unref_picture(h, &h->last_pic_for_ec);
|
||||
ff_h264_unref_picture(&h->last_pic_for_ec);
|
||||
|
||||
return get_consumed_bytes(buf_index, buf_size);
|
||||
}
|
||||
|
@ -653,9 +653,9 @@ static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
|
||||
|
||||
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
|
||||
|
||||
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, const H264Picture *src);
|
||||
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src);
|
||||
void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
|
||||
int ff_h264_ref_picture(H264Picture *dst, const H264Picture *src);
|
||||
int ff_h264_replace_picture(H264Picture *dst, const H264Picture *src);
|
||||
void ff_h264_unref_picture(H264Picture *pic);
|
||||
|
||||
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "refstruct.h"
|
||||
#include "threadframe.h"
|
||||
|
||||
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
|
||||
void ff_hevc_unref_frame(HEVCFrame *frame, int flags)
|
||||
{
|
||||
/* frame->frame can be NULL if context init failed */
|
||||
if (!frame->frame || !frame->frame->buf[0])
|
||||
@ -38,8 +38,8 @@ void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
|
||||
|
||||
frame->flags &= ~flags;
|
||||
if (!frame->flags) {
|
||||
ff_thread_release_ext_buffer(s->avctx, &frame->tf);
|
||||
ff_thread_release_buffer(s->avctx, frame->frame_grain);
|
||||
ff_thread_release_ext_buffer(&frame->tf);
|
||||
av_frame_unref(frame->frame_grain);
|
||||
frame->needs_fg = 0;
|
||||
|
||||
av_buffer_unref(&frame->tab_mvf_buf);
|
||||
@ -71,7 +71,7 @@ void ff_hevc_clear_refs(HEVCContext *s)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
|
||||
ff_hevc_unref_frame(s, &s->DPB[i],
|
||||
ff_hevc_unref_frame(&s->DPB[i],
|
||||
HEVC_FRAME_FLAG_SHORT_REF |
|
||||
HEVC_FRAME_FLAG_LONG_REF);
|
||||
}
|
||||
@ -80,7 +80,7 @@ void ff_hevc_flush_dpb(HEVCContext *s)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
|
||||
ff_hevc_unref_frame(s, &s->DPB[i], ~0);
|
||||
ff_hevc_unref_frame(&s->DPB[i], ~0);
|
||||
}
|
||||
|
||||
static HEVCFrame *alloc_frame(HEVCContext *s)
|
||||
@ -126,7 +126,7 @@ static HEVCFrame *alloc_frame(HEVCContext *s)
|
||||
|
||||
return frame;
|
||||
fail:
|
||||
ff_hevc_unref_frame(s, frame, ~0);
|
||||
ff_hevc_unref_frame(frame, ~0);
|
||||
return NULL;
|
||||
}
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error allocating frame, DPB full.\n");
|
||||
@ -177,7 +177,7 @@ static void unref_missing_refs(HEVCContext *s)
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
|
||||
HEVCFrame *frame = &s->DPB[i];
|
||||
if (frame->sequence == HEVC_SEQUENCE_COUNTER_INVALID) {
|
||||
ff_hevc_unref_frame(s, frame, ~0);
|
||||
ff_hevc_unref_frame(frame, ~0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -191,7 +191,7 @@ int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
|
||||
if ((frame->flags & mask) == HEVC_FRAME_FLAG_OUTPUT &&
|
||||
frame->sequence != s->seq_decode) {
|
||||
if (s->sh.no_output_of_prior_pics_flag == 1)
|
||||
ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT);
|
||||
ff_hevc_unref_frame(frame, HEVC_FRAME_FLAG_OUTPUT);
|
||||
else
|
||||
frame->flags |= HEVC_FRAME_FLAG_BUMPING;
|
||||
}
|
||||
@ -224,9 +224,9 @@ int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
|
||||
|
||||
ret = av_frame_ref(out, frame->needs_fg ? frame->frame_grain : frame->frame);
|
||||
if (frame->flags & HEVC_FRAME_FLAG_BUMPING)
|
||||
ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_BUMPING);
|
||||
ff_hevc_unref_frame(frame, HEVC_FRAME_FLAG_OUTPUT | HEVC_FRAME_FLAG_BUMPING);
|
||||
else
|
||||
ff_hevc_unref_frame(s, frame, HEVC_FRAME_FLAG_OUTPUT);
|
||||
ff_hevc_unref_frame(frame, HEVC_FRAME_FLAG_OUTPUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -532,7 +532,7 @@ int ff_hevc_frame_rps(HEVCContext *s)
|
||||
fail:
|
||||
/* release any frames that are now unused */
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++)
|
||||
ff_hevc_unref_frame(s, &s->DPB[i], 0);
|
||||
ff_hevc_unref_frame(&s->DPB[i], 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2919,7 +2919,7 @@ static int hevc_frame_start(HEVCContext *s)
|
||||
|
||||
fail:
|
||||
if (s->ref)
|
||||
ff_hevc_unref_frame(s, s->ref, ~0);
|
||||
ff_hevc_unref_frame(s->ref, ~0);
|
||||
s->ref = NULL;
|
||||
return ret;
|
||||
}
|
||||
@ -3360,7 +3360,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
if (s->ref && (ret = FF_HW_SIMPLE_CALL(avctx, end_frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"hardware accelerator failed to decode picture\n");
|
||||
ff_hevc_unref_frame(s, s->ref, ~0);
|
||||
ff_hevc_unref_frame(s->ref, ~0);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
@ -3369,7 +3369,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
s->sei.picture_hash.is_md5) {
|
||||
ret = verify_md5(s, s->ref->frame);
|
||||
if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
|
||||
ff_hevc_unref_frame(s, s->ref, ~0);
|
||||
ff_hevc_unref_frame(s->ref, ~0);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -3389,7 +3389,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
|
||||
static int hevc_ref_frame(HEVCFrame *dst, HEVCFrame *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -3427,7 +3427,7 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
ff_hevc_unref_frame(s, dst, ~0);
|
||||
ff_hevc_unref_frame(dst, ~0);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@ -3450,7 +3450,7 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
|
||||
av_frame_free(&s->output_frame);
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
|
||||
ff_hevc_unref_frame(s, &s->DPB[i], ~0);
|
||||
ff_hevc_unref_frame(&s->DPB[i], ~0);
|
||||
av_frame_free(&s->DPB[i].frame);
|
||||
av_frame_free(&s->DPB[i].frame_grain);
|
||||
}
|
||||
@ -3532,9 +3532,9 @@ static int hevc_update_thread_context(AVCodecContext *dst,
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
|
||||
ff_hevc_unref_frame(s, &s->DPB[i], ~0);
|
||||
ff_hevc_unref_frame(&s->DPB[i], ~0);
|
||||
if (s0->DPB[i].frame->buf[0]) {
|
||||
ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
|
||||
ret = hevc_ref_frame(&s->DPB[i], &s0->DPB[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@ -690,7 +690,7 @@ int ff_hevc_output_frame(HEVCContext *s, AVFrame *frame, int flush);
|
||||
|
||||
void ff_hevc_bump_frame(HEVCContext *s);
|
||||
|
||||
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags);
|
||||
void ff_hevc_unref_frame(HEVCFrame *frame, int flags);
|
||||
|
||||
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0,
|
||||
int nPbW, int nPbH);
|
||||
|
@ -111,7 +111,7 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
|
||||
if (ctx->frames[i].f)
|
||||
ff_thread_release_ext_buffer(avctx, &ctx->frames[i]);
|
||||
ff_thread_release_ext_buffer(&ctx->frames[i]);
|
||||
av_frame_free(&ctx->frames[i].f);
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ static int mimic_decode_update_thread_context(AVCodecContext *avctx, const AVCod
|
||||
dst->prev_index = src->next_prev_index;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(dst->frames); i++) {
|
||||
ff_thread_release_ext_buffer(avctx, &dst->frames[i]);
|
||||
ff_thread_release_ext_buffer(&dst->frames[i]);
|
||||
if (i != src->next_cur_index && src->frames[i].f->data[0]) {
|
||||
ret = ff_thread_ref_frame(&dst->frames[i], &src->frames[i]);
|
||||
if (ret < 0)
|
||||
@ -395,7 +395,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &ctx->frames[ctx->cur_index]);
|
||||
ff_thread_release_ext_buffer(&ctx->frames[ctx->cur_index]);
|
||||
ctx->frames[ctx->cur_index].f->pict_type = is_pframe ? AV_PICTURE_TYPE_P :
|
||||
AV_PICTURE_TYPE_I;
|
||||
if ((res = ff_thread_get_ext_buffer(avctx, &ctx->frames[ctx->cur_index],
|
||||
@ -420,7 +420,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
ff_thread_report_progress(&ctx->frames[ctx->cur_index], INT_MAX, 0);
|
||||
if (res < 0) {
|
||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_release_ext_buffer(avctx, &ctx->frames[ctx->cur_index]);
|
||||
ff_thread_release_ext_buffer(&ctx->frames[ctx->cur_index]);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ static int handle_pic_linesizes(AVCodecContext *avctx, Picture *pic,
|
||||
"get_buffer() failed (stride changed: linesize=%d/%d uvlinesize=%d/%d)\n",
|
||||
linesize, pic->f->linesize[0],
|
||||
uvlinesize, pic->f->linesize[1]);
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ static int handle_pic_linesizes(AVCodecContext *avctx, Picture *pic,
|
||||
pic->f->linesize[1] != pic->f->linesize[2]) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"get_buffer() failed (uv stride mismatch)\n");
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -153,7 +153,7 @@ static int handle_pic_linesizes(AVCodecContext *avctx, Picture *pic,
|
||||
pic->f->linesize[0])) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"get_buffer() failed to allocate context scratch buffers.\n");
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -241,7 +241,7 @@ int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me,
|
||||
return 0;
|
||||
fail:
|
||||
av_log(avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
free_picture_tables(pic);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@ -250,15 +250,10 @@ fail:
|
||||
* Deallocate a picture; frees the picture tables in case they
|
||||
* need to be reallocated anyway.
|
||||
*/
|
||||
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
|
||||
void ff_mpeg_unref_picture(Picture *pic)
|
||||
{
|
||||
pic->tf.f = pic->f;
|
||||
if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
|
||||
avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
|
||||
avctx->codec_id != AV_CODEC_ID_MSS2)
|
||||
ff_thread_release_ext_buffer(avctx, &pic->tf);
|
||||
else if (pic->f)
|
||||
av_frame_unref(pic->f);
|
||||
ff_thread_release_ext_buffer(&pic->tf);
|
||||
|
||||
ff_refstruct_unref(&pic->hwaccel_picture_private);
|
||||
|
||||
@ -306,7 +301,7 @@ int ff_update_picture_tables(Picture *dst, const Picture *src)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
|
||||
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -336,7 +331,7 @@ int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
ff_mpeg_unref_picture(avctx, dst);
|
||||
ff_mpeg_unref_picture(dst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -388,15 +383,15 @@ int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
|
||||
|
||||
if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
|
||||
if (picture[ret].needs_realloc) {
|
||||
ff_mpeg_unref_picture(avctx, &picture[ret]);
|
||||
ff_mpeg_unref_picture(&picture[ret]);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void av_cold ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic)
|
||||
void av_cold ff_mpv_picture_free(Picture *pic)
|
||||
{
|
||||
free_picture_tables(pic);
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
av_frame_free(&pic->f);
|
||||
}
|
||||
|
@ -92,10 +92,10 @@ int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me,
|
||||
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
|
||||
ScratchpadContext *sc, int linesize);
|
||||
|
||||
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src);
|
||||
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *picture);
|
||||
int ff_mpeg_ref_picture(Picture *dst, Picture *src);
|
||||
void ff_mpeg_unref_picture(Picture *picture);
|
||||
|
||||
void ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic);
|
||||
void ff_mpv_picture_free(Picture *pic);
|
||||
int ff_update_picture_tables(Picture *dst, const Picture *src);
|
||||
|
||||
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared);
|
||||
|
@ -793,12 +793,12 @@ void ff_mpv_common_end(MpegEncContext *s)
|
||||
|
||||
if (s->picture) {
|
||||
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
|
||||
ff_mpv_picture_free(s->avctx, &s->picture[i]);
|
||||
ff_mpv_picture_free(&s->picture[i]);
|
||||
}
|
||||
av_freep(&s->picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->last_picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->current_picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->next_picture);
|
||||
ff_mpv_picture_free(&s->last_picture);
|
||||
ff_mpv_picture_free(&s->current_picture);
|
||||
ff_mpv_picture_free(&s->next_picture);
|
||||
|
||||
s->context_initialized = 0;
|
||||
s->context_reinit = 0;
|
||||
|
@ -106,17 +106,17 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
|
||||
av_assert0(!s->picture || s->picture != s1->picture);
|
||||
if (s->picture)
|
||||
for (int i = 0; i < MAX_PICTURE_COUNT; i++) {
|
||||
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
if (s1->picture && s1->picture[i].f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
|
||||
(ret = ff_mpeg_ref_picture(&s->picture[i], &s1->picture[i])) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define UPDATE_PICTURE(pic)\
|
||||
do {\
|
||||
ff_mpeg_unref_picture(s->avctx, &s->pic);\
|
||||
ff_mpeg_unref_picture(&s->pic);\
|
||||
if (s1->pic.f && s1->pic.f->buf[0])\
|
||||
ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
|
||||
ret = ff_mpeg_ref_picture(&s->pic, &s1->pic);\
|
||||
else\
|
||||
ret = ff_update_picture_tables(&s->pic, &s1->pic);\
|
||||
if (ret < 0)\
|
||||
@ -266,7 +266,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic)
|
||||
s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
|
||||
&s->linesize, &s->uvlinesize);
|
||||
fail:
|
||||
ff_mpeg_unref_picture(avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
|
||||
s->last_picture_ptr != s->next_picture_ptr &&
|
||||
s->last_picture_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
|
||||
ff_mpeg_unref_picture(s->last_picture_ptr);
|
||||
}
|
||||
|
||||
/* release non reference/forgotten frames */
|
||||
@ -317,13 +317,13 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
(&s->picture[i] != s->last_picture_ptr &&
|
||||
&s->picture[i] != s->next_picture_ptr &&
|
||||
!s->picture[i].needs_realloc)) {
|
||||
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
|
||||
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
|
||||
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
|
||||
if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
|
||||
// we already have an unused image
|
||||
@ -372,7 +372,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
else
|
||||
s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
|
||||
if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
|
||||
if ((ret = ff_mpeg_ref_picture(&s->current_picture,
|
||||
s->current_picture_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
@ -446,13 +446,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
|
||||
if (s->last_picture_ptr) {
|
||||
if (s->last_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
|
||||
(ret = ff_mpeg_ref_picture(&s->last_picture,
|
||||
s->last_picture_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->next_picture_ptr) {
|
||||
if (s->next_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
|
||||
(ret = ff_mpeg_ref_picture(&s->next_picture,
|
||||
s->next_picture_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -554,12 +554,12 @@ void ff_mpeg_flush(AVCodecContext *avctx)
|
||||
return;
|
||||
|
||||
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
|
||||
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
|
||||
|
||||
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
|
||||
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
|
||||
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
|
||||
s->mb_x = s->mb_y = 0;
|
||||
|
||||
|
@ -1189,7 +1189,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
|
||||
return ret;
|
||||
ret = av_frame_copy_props(pic->f, pic_arg);
|
||||
if (ret < 0) {
|
||||
ff_mpeg_unref_picture(s->avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1544,7 +1544,7 @@ static int select_input_picture(MpegEncContext *s)
|
||||
} else if (s->b_frame_strategy == 2) {
|
||||
b_frames = estimate_best_b_count(s);
|
||||
if (b_frames < 0) {
|
||||
ff_mpeg_unref_picture(s->avctx, s->input_picture[0]);
|
||||
ff_mpeg_unref_picture(s->input_picture[0]);
|
||||
return b_frames;
|
||||
}
|
||||
}
|
||||
@ -1620,7 +1620,7 @@ no_output_pic:
|
||||
|
||||
ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
|
||||
if (ret < 0) {
|
||||
ff_mpeg_unref_picture(s->avctx, pic);
|
||||
ff_mpeg_unref_picture(pic);
|
||||
goto fail;
|
||||
}
|
||||
pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
|
||||
@ -1644,7 +1644,7 @@ no_output_pic:
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
ff_mpeg_unref_picture(s->avctx, s->reordered_input_picture[0]);
|
||||
ff_mpeg_unref_picture(s->reordered_input_picture[0]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1713,13 +1713,13 @@ static int frame_start(MpegEncContext *s)
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
|
||||
s->last_picture_ptr != s->next_picture_ptr &&
|
||||
s->last_picture_ptr->f->buf[0]) {
|
||||
ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
|
||||
ff_mpeg_unref_picture(s->last_picture_ptr);
|
||||
}
|
||||
|
||||
s->current_picture_ptr->f->pict_type = s->pict_type;
|
||||
|
||||
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
|
||||
if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
|
||||
ff_mpeg_unref_picture(&s->current_picture);
|
||||
if ((ret = ff_mpeg_ref_picture(&s->current_picture,
|
||||
s->current_picture_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
@ -1729,16 +1729,16 @@ static int frame_start(MpegEncContext *s)
|
||||
}
|
||||
|
||||
if (s->last_picture_ptr) {
|
||||
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
|
||||
ff_mpeg_unref_picture(&s->last_picture);
|
||||
if (s->last_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
|
||||
(ret = ff_mpeg_ref_picture(&s->last_picture,
|
||||
s->last_picture_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->next_picture_ptr) {
|
||||
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
|
||||
ff_mpeg_unref_picture(&s->next_picture);
|
||||
if (s->next_picture_ptr->f->buf[0] &&
|
||||
(ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
|
||||
(ret = ff_mpeg_ref_picture(&s->next_picture,
|
||||
s->next_picture_ptr)) < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -1990,7 +1990,7 @@ vbv_retry:
|
||||
/* release non-reference frames */
|
||||
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
|
||||
if (!s->picture[i].reference)
|
||||
ff_mpeg_unref_picture(avctx, &s->picture[i]);
|
||||
ff_mpeg_unref_picture(&s->picture[i]);
|
||||
}
|
||||
|
||||
av_assert1((s->frame_bits & 7) == 0);
|
||||
|
@ -815,7 +815,7 @@ static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
s->bpp += byte_depth;
|
||||
}
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &s->picture);
|
||||
ff_thread_release_ext_buffer(&s->picture);
|
||||
if (s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
|
||||
/* We only need a buffer for the current picture. */
|
||||
ret = ff_thread_get_buffer(avctx, p, 0);
|
||||
@ -1703,7 +1703,7 @@ static int decode_frame_png(AVCodecContext *avctx, AVFrame *p,
|
||||
goto the_end;
|
||||
|
||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_picture);
|
||||
ff_thread_release_ext_buffer(&s->last_picture);
|
||||
FFSWAP(ThreadFrame, s->picture, s->last_picture);
|
||||
}
|
||||
|
||||
@ -1756,9 +1756,9 @@ static int decode_frame_apng(AVCodecContext *avctx, AVFrame *p,
|
||||
|
||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
if (s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->picture);
|
||||
ff_thread_release_ext_buffer(&s->picture);
|
||||
} else {
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_picture);
|
||||
ff_thread_release_ext_buffer(&s->last_picture);
|
||||
FFSWAP(ThreadFrame, s->picture, s->last_picture);
|
||||
}
|
||||
}
|
||||
@ -1799,7 +1799,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
src_frame = psrc->dispose_op == APNG_DISPOSE_OP_PREVIOUS ?
|
||||
&psrc->last_picture : &psrc->picture;
|
||||
|
||||
ff_thread_release_ext_buffer(dst, &pdst->last_picture);
|
||||
ff_thread_release_ext_buffer(&pdst->last_picture);
|
||||
if (src_frame && src_frame->f->data[0]) {
|
||||
ret = ff_thread_ref_frame(&pdst->last_picture, src_frame);
|
||||
if (ret < 0)
|
||||
@ -1831,9 +1831,9 @@ static av_cold int png_dec_end(AVCodecContext *avctx)
|
||||
{
|
||||
PNGDecContext *s = avctx->priv_data;
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_picture);
|
||||
ff_thread_release_ext_buffer(&s->last_picture);
|
||||
av_frame_free(&s->last_picture.f);
|
||||
ff_thread_release_ext_buffer(avctx, &s->picture);
|
||||
ff_thread_release_ext_buffer(&s->picture);
|
||||
av_frame_free(&s->picture.f);
|
||||
av_freep(&s->buffer);
|
||||
s->buffer_size = 0;
|
||||
|
@ -223,7 +223,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
|
||||
p->result = codec->cb.decode(avctx, p->frame, &p->got_frame, p->avpkt);
|
||||
|
||||
if ((p->result < 0 || !p->got_frame) && p->frame->buf[0])
|
||||
ff_thread_release_buffer(avctx, p->frame);
|
||||
av_frame_unref(p->frame);
|
||||
|
||||
if (atomic_load(&p->state) == STATE_SETTING_UP)
|
||||
ff_thread_finish_setup(avctx);
|
||||
@ -1009,20 +1009,10 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
|
||||
{
|
||||
if (!f)
|
||||
return;
|
||||
|
||||
if (avctx->debug & FF_DEBUG_BUFFERS)
|
||||
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
|
||||
|
||||
av_frame_unref(f);
|
||||
}
|
||||
|
||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
||||
void ff_thread_release_ext_buffer(ThreadFrame *f)
|
||||
{
|
||||
ff_refstruct_unref(&f->progress);
|
||||
f->owner[0] = f->owner[1] = NULL;
|
||||
ff_thread_release_buffer(avctx, f->f);
|
||||
if (f->f)
|
||||
av_frame_unref(f->f);
|
||||
}
|
||||
|
@ -74,14 +74,6 @@ void ff_thread_finish_setup(AVCodecContext *avctx);
|
||||
*/
|
||||
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
|
||||
|
||||
/**
|
||||
* Wrapper around av_frame_unref() for frame-threaded codecs.
|
||||
*
|
||||
* @param avctx The current context.
|
||||
* @param f The picture being released.
|
||||
*/
|
||||
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
|
||||
|
||||
int ff_thread_init(AVCodecContext *s);
|
||||
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
|
||||
int (*action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr),
|
||||
|
@ -78,11 +78,10 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
|
||||
* @param avctx The current context.
|
||||
* @param f The picture being released.
|
||||
*/
|
||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
|
||||
void ff_thread_release_ext_buffer(ThreadFrame *f);
|
||||
|
||||
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
|
||||
|
||||
int ff_thread_replace_frame(AVCodecContext *avctx, ThreadFrame *dst,
|
||||
const ThreadFrame *src);
|
||||
int ff_thread_replace_frame(ThreadFrame *dst, const ThreadFrame *src);
|
||||
|
||||
#endif
|
||||
|
@ -887,8 +887,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_thread_replace_frame(AVCodecContext *avctx, ThreadFrame *dst,
|
||||
const ThreadFrame *src)
|
||||
int ff_thread_replace_frame(ThreadFrame *dst, const ThreadFrame *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -917,13 +916,7 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||
return ff_get_buffer(avctx, f->f, flags);
|
||||
}
|
||||
|
||||
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
|
||||
{
|
||||
if (f)
|
||||
av_frame_unref(f);
|
||||
}
|
||||
|
||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
||||
void ff_thread_release_ext_buffer(ThreadFrame *f)
|
||||
{
|
||||
f->owner[0] = f->owner[1] = NULL;
|
||||
if (f->f)
|
||||
|
@ -98,15 +98,10 @@ static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
|
||||
{
|
||||
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
|
||||
|
||||
if (ctx->tmp_frame->buf[0])
|
||||
ff_thread_release_buffer(avctx, ctx->tmp_frame);
|
||||
av_frame_free(&ctx->tmp_frame);
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
|
||||
if (ctx->ref_tab[i].frame->buf[0])
|
||||
ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++)
|
||||
av_frame_free(&ctx->ref_tab[i].frame);
|
||||
}
|
||||
|
||||
return ff_vaapi_decode_uninit(avctx);
|
||||
}
|
||||
@ -137,7 +132,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
|
||||
|
||||
if (apply_grain) {
|
||||
if (ctx->tmp_frame->buf[0])
|
||||
ff_thread_release_buffer(avctx, ctx->tmp_frame);
|
||||
av_frame_unref(ctx->tmp_frame);
|
||||
err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
@ -382,7 +377,7 @@ static int vaapi_av1_end_frame(AVCodecContext *avctx)
|
||||
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
||||
if (header->refresh_frame_flags & (1 << i)) {
|
||||
if (ctx->ref_tab[i].frame->buf[0])
|
||||
ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
|
||||
av_frame_unref(ctx->ref_tab[i].frame);
|
||||
|
||||
if (apply_grain) {
|
||||
ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
|
||||
|
@ -337,11 +337,11 @@ static void vp3_decode_flush(AVCodecContext *avctx)
|
||||
Vp3DecodeContext *s = avctx->priv_data;
|
||||
|
||||
if (s->golden_frame.f)
|
||||
ff_thread_release_ext_buffer(avctx, &s->golden_frame);
|
||||
ff_thread_release_ext_buffer(&s->golden_frame);
|
||||
if (s->last_frame.f)
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_frame);
|
||||
ff_thread_release_ext_buffer(&s->last_frame);
|
||||
if (s->current_frame.f)
|
||||
ff_thread_release_ext_buffer(avctx, &s->current_frame);
|
||||
ff_thread_release_ext_buffer(&s->current_frame);
|
||||
}
|
||||
|
||||
static av_cold int vp3_decode_end(AVCodecContext *avctx)
|
||||
@ -2499,20 +2499,20 @@ static int update_frames(AVCodecContext *avctx)
|
||||
int ret = 0;
|
||||
|
||||
if (s->keyframe) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->golden_frame);
|
||||
ff_thread_release_ext_buffer(&s->golden_frame);
|
||||
ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
|
||||
}
|
||||
/* shuffle frames */
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_frame);
|
||||
ff_thread_release_ext_buffer(&s->last_frame);
|
||||
FFSWAP(ThreadFrame, s->last_frame, s->current_frame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if HAVE_THREADS
|
||||
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, const ThreadFrame *src)
|
||||
static int ref_frame(ThreadFrame *dst, const ThreadFrame *src)
|
||||
{
|
||||
ff_thread_release_ext_buffer(s->avctx, dst);
|
||||
ff_thread_release_ext_buffer(dst);
|
||||
if (src->f->data[0])
|
||||
return ff_thread_ref_frame(dst, src);
|
||||
return 0;
|
||||
@ -2521,9 +2521,9 @@ static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, const ThreadFrame *s
|
||||
static int ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
|
||||
{
|
||||
int ret;
|
||||
if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
|
||||
(ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
|
||||
(ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
|
||||
if ((ret = ref_frame(&dst->current_frame, &src->current_frame)) < 0 ||
|
||||
(ret = ref_frame(&dst->golden_frame, &src->golden_frame)) < 0 ||
|
||||
(ret = ref_frame(&dst->last_frame, &src->last_frame)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
@ -2732,7 +2732,7 @@ static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if ((ret = ff_thread_get_ext_buffer(avctx, &s->golden_frame,
|
||||
AV_GET_BUFFER_FLAG_REF)) < 0)
|
||||
goto error;
|
||||
ff_thread_release_ext_buffer(avctx, &s->last_frame);
|
||||
ff_thread_release_ext_buffer(&s->last_frame);
|
||||
if ((ret = ff_thread_ref_frame(&s->last_frame,
|
||||
&s->golden_frame)) < 0)
|
||||
goto error;
|
||||
|
@ -116,23 +116,23 @@ static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
|
||||
|
||||
fail:
|
||||
ff_refstruct_unref(&f->seg_map);
|
||||
ff_thread_release_ext_buffer(s->avctx, &f->tf);
|
||||
ff_thread_release_ext_buffer(&f->tf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
|
||||
static void vp8_release_frame(VP8Frame *f)
|
||||
{
|
||||
ff_refstruct_unref(&f->seg_map);
|
||||
ff_refstruct_unref(&f->hwaccel_picture_private);
|
||||
ff_thread_release_ext_buffer(s->avctx, &f->tf);
|
||||
ff_thread_release_ext_buffer(&f->tf);
|
||||
}
|
||||
|
||||
#if CONFIG_VP8_DECODER
|
||||
static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, const VP8Frame *src)
|
||||
static int vp8_ref_frame(VP8Frame *dst, const VP8Frame *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
vp8_release_frame(s, dst);
|
||||
vp8_release_frame(dst);
|
||||
|
||||
if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
|
||||
return ret;
|
||||
@ -150,7 +150,7 @@ static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
|
||||
vp8_release_frame(s, &s->frames[i]);
|
||||
vp8_release_frame(&s->frames[i]);
|
||||
memset(s->framep, 0, sizeof(s->framep));
|
||||
|
||||
if (free_mem)
|
||||
@ -184,7 +184,7 @@ static VP8Frame *vp8_find_free_buffer(VP8Context *s)
|
||||
abort();
|
||||
}
|
||||
if (frame->tf.f->buf[0])
|
||||
vp8_release_frame(s, frame);
|
||||
vp8_release_frame(frame);
|
||||
|
||||
return frame;
|
||||
}
|
||||
@ -2699,7 +2699,7 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
|
||||
&s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
|
||||
&s->frames[i] != s->framep[VP8_FRAME_GOLDEN] &&
|
||||
&s->frames[i] != s->framep[VP8_FRAME_ALTREF])
|
||||
vp8_release_frame(s, &s->frames[i]);
|
||||
vp8_release_frame(&s->frames[i]);
|
||||
|
||||
curframe = s->framep[VP8_FRAME_CURRENT] = vp8_find_free_buffer(s);
|
||||
|
||||
@ -2950,7 +2950,7 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst,
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
|
||||
if (s_src->frames[i].tf.f->buf[0]) {
|
||||
int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
|
||||
int ret = vp8_ref_frame(&s->frames[i], &s_src->frames[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@ -97,9 +97,9 @@ static void vp9_tile_data_free(VP9TileData *td)
|
||||
av_freep(&td->block_structure);
|
||||
}
|
||||
|
||||
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
|
||||
static void vp9_frame_unref(VP9Frame *f)
|
||||
{
|
||||
ff_thread_release_ext_buffer(avctx, &f->tf);
|
||||
ff_thread_release_ext_buffer(&f->tf);
|
||||
av_buffer_unref(&f->extradata);
|
||||
ff_refstruct_unref(&f->hwaccel_picture_private);
|
||||
f->segmentation_map = NULL;
|
||||
@ -142,11 +142,11 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
vp9_frame_unref(avctx, f);
|
||||
vp9_frame_unref(f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
|
||||
static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -168,7 +168,7 @@ static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
vp9_frame_unref(avctx, dst);
|
||||
vp9_frame_unref(dst);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@ -1242,14 +1242,14 @@ static av_cold int vp9_decode_free(AVCodecContext *avctx)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
vp9_frame_unref(avctx, &s->s.frames[i]);
|
||||
vp9_frame_unref(&s->s.frames[i]);
|
||||
av_frame_free(&s->s.frames[i].tf.f);
|
||||
}
|
||||
av_buffer_pool_uninit(&s->frame_extradata_pool);
|
||||
for (i = 0; i < 8; i++) {
|
||||
ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->s.refs[i]);
|
||||
av_frame_free(&s->s.refs[i].f);
|
||||
ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->next_refs[i]);
|
||||
av_frame_free(&s->next_refs[i].f);
|
||||
}
|
||||
|
||||
@ -1577,7 +1577,7 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
frame->pkt_dts = pkt->dts;
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (s->next_refs[i].f->buf[0])
|
||||
ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->next_refs[i]);
|
||||
if (s->s.refs[i].f->buf[0] &&
|
||||
(ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
|
||||
return ret;
|
||||
@ -1590,18 +1590,18 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
|
||||
if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
|
||||
if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
|
||||
vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
|
||||
vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
|
||||
if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
|
||||
(ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
|
||||
(ret = vp9_frame_ref(&s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
|
||||
vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
|
||||
vp9_frame_unref(&s->s.frames[REF_FRAME_MVPAIR]);
|
||||
if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
|
||||
(ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
|
||||
(ret = vp9_frame_ref(&s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
|
||||
return ret;
|
||||
if (s->s.frames[CUR_FRAME].tf.f->buf[0])
|
||||
vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
|
||||
vp9_frame_unref(&s->s.frames[CUR_FRAME]);
|
||||
if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
|
||||
return ret;
|
||||
f = s->s.frames[CUR_FRAME].tf.f;
|
||||
@ -1614,13 +1614,13 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
|
||||
(s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
|
||||
s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
|
||||
vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
|
||||
vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
|
||||
}
|
||||
|
||||
// ref frame setup
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (s->next_refs[i].f->buf[0])
|
||||
ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->next_refs[i]);
|
||||
if (s->s.h.refreshrefmask & (1 << i)) {
|
||||
ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
|
||||
} else if (s->s.refs[i].f->buf[0]) {
|
||||
@ -1770,7 +1770,7 @@ finish:
|
||||
// ref frame setup
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (s->s.refs[i].f->buf[0])
|
||||
ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->s.refs[i]);
|
||||
if (s->next_refs[i].f->buf[0] &&
|
||||
(ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
|
||||
return ret;
|
||||
@ -1791,9 +1791,9 @@ static void vp9_decode_flush(AVCodecContext *avctx)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
vp9_frame_unref(avctx, &s->s.frames[i]);
|
||||
vp9_frame_unref(&s->s.frames[i]);
|
||||
for (i = 0; i < 8; i++)
|
||||
ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->s.refs[i]);
|
||||
|
||||
if (FF_HW_HAS_CB(avctx, flush))
|
||||
FF_HW_SIMPLE_CALL(avctx, flush);
|
||||
@ -1837,15 +1837,15 @@ static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (s->s.frames[i].tf.f->buf[0])
|
||||
vp9_frame_unref(dst, &s->s.frames[i]);
|
||||
vp9_frame_unref(&s->s.frames[i]);
|
||||
if (ssrc->s.frames[i].tf.f->buf[0]) {
|
||||
if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
|
||||
if ((ret = vp9_frame_ref(&s->s.frames[i], &ssrc->s.frames[i])) < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (s->s.refs[i].f->buf[0])
|
||||
ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
|
||||
ff_thread_release_ext_buffer(&s->s.refs[i]);
|
||||
if (ssrc->next_refs[i].f->buf[0]) {
|
||||
if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
|
||||
return ret;
|
||||
|
@ -1020,7 +1020,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
if (dst == src)
|
||||
return 0;
|
||||
|
||||
ff_thread_release_ext_buffer(dst, &fdst->curr_frame);
|
||||
ff_thread_release_ext_buffer(&fdst->curr_frame);
|
||||
if (fsrc->curr_frame.f->data[0]) {
|
||||
if ((ret = ff_thread_ref_frame(&fdst->curr_frame, &fsrc->curr_frame)) < 0)
|
||||
return ret;
|
||||
@ -1061,10 +1061,10 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
|
||||
av_freep(&s->fdec);
|
||||
s->fdec_num = 0;
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &s->curr_frame);
|
||||
ff_thread_release_ext_buffer(&s->curr_frame);
|
||||
av_frame_free(&s->curr_frame.f);
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &s->prev_frame);
|
||||
ff_thread_release_ext_buffer(&s->prev_frame);
|
||||
av_frame_free(&s->prev_frame.f);
|
||||
|
||||
ff_refstruct_unref(&s->dsdctx);
|
||||
@ -1526,14 +1526,14 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
|
||||
av_log(avctx, AV_LOG_ERROR, "Error reinitializing the DSD context\n");
|
||||
return ret;
|
||||
}
|
||||
ff_thread_release_ext_buffer(avctx, &wc->curr_frame);
|
||||
ff_thread_release_ext_buffer(&wc->curr_frame);
|
||||
}
|
||||
av_channel_layout_copy(&avctx->ch_layout, &new_ch_layout);
|
||||
avctx->sample_rate = new_samplerate;
|
||||
avctx->sample_fmt = sample_fmt;
|
||||
avctx->bits_per_raw_sample = orig_bpp;
|
||||
|
||||
ff_thread_release_ext_buffer(avctx, &wc->prev_frame);
|
||||
ff_thread_release_ext_buffer(&wc->prev_frame);
|
||||
FFSWAP(ThreadFrame, wc->curr_frame, wc->prev_frame);
|
||||
|
||||
/* get output buffer */
|
||||
@ -1664,7 +1664,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
}
|
||||
|
||||
ff_thread_await_progress(&s->prev_frame, INT_MAX, 0);
|
||||
ff_thread_release_ext_buffer(avctx, &s->prev_frame);
|
||||
ff_thread_release_ext_buffer(&s->prev_frame);
|
||||
|
||||
if (s->modulation == MODULATION_DSD)
|
||||
avctx->execute2(avctx, dsd_channel, s->frame, NULL, avctx->ch_layout.nb_channels);
|
||||
@ -1681,7 +1681,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
error:
|
||||
if (s->frame) {
|
||||
ff_thread_await_progress(&s->prev_frame, INT_MAX, 0);
|
||||
ff_thread_release_ext_buffer(avctx, &s->prev_frame);
|
||||
ff_thread_release_ext_buffer(&s->prev_frame);
|
||||
ff_thread_report_progress(&s->curr_frame, INT_MAX, 0);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user