mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
avcodec/thread: Don't use ThreadFrame when unnecessary
The majority of frame-threaded decoders (mainly the intra-only) need exactly one part of ThreadFrame: The AVFrame. They don't need the owners nor the progress, yet they had to use it because ff_thread_(get|release)_buffer() requires it. This commit changes this and makes these functions work with ordinary AVFrames; the decoders that need the extra fields for progress use ff_thread_(get|release)_ext_buffer() which work exactly as ff_thread_(get|release)_buffer() used to do. This also avoids some unnecessary allocations of progress AVBuffers, namely for H.264 and HEVC film grain frames: These frames are not used for synchronization and therefore don't need a ThreadFrame. Also move the ThreadFrame structure as well as ff_thread_ref_frame() to threadframe.h, the header for frame-threaded decoders with inter-frame dependencies. Reviewed-by: Anton Khirnov <anton@khirnov.net> Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
f025b8e110
commit
02220b88fc
@ -391,7 +391,6 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
uint32_t off;
|
uint32_t off;
|
||||||
int x, y, ret;
|
int x, y, ret;
|
||||||
int slice_size;
|
int slice_size;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
|
|
||||||
ctx->frame = data;
|
ctx->frame = data;
|
||||||
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
|
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
@ -410,7 +409,7 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, ctx->frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bytestream2_init(&gb, buf + AIC_HDR_SIZE,
|
bytestream2_init(&gb, buf + AIC_HDR_SIZE,
|
||||||
|
@ -270,10 +270,9 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (!alac->nb_samples) {
|
if (!alac->nb_samples) {
|
||||||
ThreadFrame tframe = { .f = frame };
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
frame->nb_samples = output_samples;
|
frame->nb_samples = output_samples;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
} else if (output_samples != alac->nb_samples) {
|
} else if (output_samples != alac->nb_samples) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "sample count mismatch: %"PRIu32" != %d\n",
|
av_log(avctx, AV_LOG_ERROR, "sample count mismatch: %"PRIu32" != %d\n",
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "hwconfig.h"
|
#include "hwconfig.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "profiles.h"
|
#include "profiles.h"
|
||||||
|
#include "thread.h"
|
||||||
|
|
||||||
/**< same with Div_Lut defined in spec 7.11.3.7 */
|
/**< same with Div_Lut defined in spec 7.11.3.7 */
|
||||||
static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
|
static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
|
||||||
@ -569,7 +570,7 @@ static int get_pixel_format(AVCodecContext *avctx)
|
|||||||
|
|
||||||
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
|
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
|
||||||
{
|
{
|
||||||
ff_thread_release_buffer(avctx, &f->tf);
|
ff_thread_release_buffer(avctx, f->f);
|
||||||
av_buffer_unref(&f->hwaccel_priv_buf);
|
av_buffer_unref(&f->hwaccel_priv_buf);
|
||||||
f->hwaccel_picture_private = NULL;
|
f->hwaccel_picture_private = NULL;
|
||||||
av_buffer_unref(&f->header_ref);
|
av_buffer_unref(&f->header_ref);
|
||||||
@ -591,10 +592,10 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
|
|||||||
|
|
||||||
dst->raw_frame_header = src->raw_frame_header;
|
dst->raw_frame_header = src->raw_frame_header;
|
||||||
|
|
||||||
if (!src->tf.f->buf[0])
|
if (!src->f->buf[0])
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
|
ret = av_frame_ref(dst->f, src->f);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
@ -637,10 +638,10 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
|
|||||||
|
|
||||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
|
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
|
||||||
av1_frame_unref(avctx, &s->ref[i]);
|
av1_frame_unref(avctx, &s->ref[i]);
|
||||||
av_frame_free(&s->ref[i].tf.f);
|
av_frame_free(&s->ref[i].f);
|
||||||
}
|
}
|
||||||
av1_frame_unref(avctx, &s->cur_frame);
|
av1_frame_unref(avctx, &s->cur_frame);
|
||||||
av_frame_free(&s->cur_frame.tf.f);
|
av_frame_free(&s->cur_frame.f);
|
||||||
|
|
||||||
av_buffer_unref(&s->seq_ref);
|
av_buffer_unref(&s->seq_ref);
|
||||||
av_buffer_unref(&s->header_ref);
|
av_buffer_unref(&s->header_ref);
|
||||||
@ -741,16 +742,16 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
|
|||||||
s->pix_fmt = AV_PIX_FMT_NONE;
|
s->pix_fmt = AV_PIX_FMT_NONE;
|
||||||
|
|
||||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
|
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
|
||||||
s->ref[i].tf.f = av_frame_alloc();
|
s->ref[i].f = av_frame_alloc();
|
||||||
if (!s->ref[i].tf.f) {
|
if (!s->ref[i].f) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Failed to allocate reference frame buffer %d.\n", i);
|
"Failed to allocate reference frame buffer %d.\n", i);
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->cur_frame.tf.f = av_frame_alloc();
|
s->cur_frame.f = av_frame_alloc();
|
||||||
if (!s->cur_frame.tf.f) {
|
if (!s->cur_frame.f) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Failed to allocate current frame buffer.\n");
|
"Failed to allocate current frame buffer.\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -803,10 +804,10 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, f->f, AV_GET_BUFFER_FLAG_REF)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
frame = f->tf.f;
|
frame = f->f;
|
||||||
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
|
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
|
||||||
|
|
||||||
switch (header->frame_type) {
|
switch (header->frame_type) {
|
||||||
@ -905,7 +906,7 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
|
|||||||
const AVPacket *pkt, int *got_frame)
|
const AVPacket *pkt, int *got_frame)
|
||||||
{
|
{
|
||||||
AV1DecContext *s = avctx->priv_data;
|
AV1DecContext *s = avctx->priv_data;
|
||||||
const AVFrame *srcframe = s->cur_frame.tf.f;
|
const AVFrame *srcframe = s->cur_frame.f;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
// TODO: all layers
|
// TODO: all layers
|
||||||
@ -1101,7 +1102,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->cur_frame.tf.f->buf[0]) {
|
if (s->cur_frame.f->buf[0]) {
|
||||||
ret = set_output_frame(avctx, frame, pkt, got_frame);
|
ret = set_output_frame(avctx, frame, pkt, got_frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
|
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
|
||||||
@ -1121,7 +1122,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
|||||||
s->cur_frame.spatial_id = header->spatial_id;
|
s->cur_frame.spatial_id = header->spatial_id;
|
||||||
s->cur_frame.temporal_id = header->temporal_id;
|
s->cur_frame.temporal_id = header->temporal_id;
|
||||||
|
|
||||||
if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
|
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
|
||||||
ret = avctx->hwaccel->start_frame(avctx, unit->data,
|
ret = avctx->hwaccel->start_frame(avctx, unit->data,
|
||||||
unit->data_size);
|
unit->data_size);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -1148,7 +1149,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
|
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
|
||||||
ret = avctx->hwaccel->decode_slice(avctx,
|
ret = avctx->hwaccel->decode_slice(avctx,
|
||||||
raw_tile_group->tile_data.data,
|
raw_tile_group->tile_data.data,
|
||||||
raw_tile_group->tile_data.data_size);
|
raw_tile_group->tile_data.data_size);
|
||||||
@ -1171,7 +1172,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
|
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
|
||||||
if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
|
if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
|
||||||
ret = avctx->hwaccel->end_frame(avctx);
|
ret = avctx->hwaccel->end_frame(avctx);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
|
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
|
||||||
@ -1185,7 +1186,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->raw_frame_header->show_frame && s->cur_frame.tf.f->buf[0]) {
|
if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
|
||||||
ret = set_output_frame(avctx, frame, pkt, got_frame);
|
ret = set_output_frame(avctx, frame, pkt, got_frame);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
|
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
|
||||||
|
@ -24,14 +24,14 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "libavutil/buffer.h"
|
#include "libavutil/buffer.h"
|
||||||
|
#include "libavutil/frame.h"
|
||||||
#include "libavutil/pixfmt.h"
|
#include "libavutil/pixfmt.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "cbs.h"
|
#include "cbs.h"
|
||||||
#include "cbs_av1.h"
|
#include "cbs_av1.h"
|
||||||
#include "thread.h"
|
|
||||||
|
|
||||||
typedef struct AV1Frame {
|
typedef struct AV1Frame {
|
||||||
ThreadFrame tf;
|
AVFrame *f;
|
||||||
|
|
||||||
AVBufferRef *hwaccel_priv_buf;
|
AVBufferRef *hwaccel_priv_buf;
|
||||||
void *hwaccel_picture_private;
|
void *hwaccel_picture_private;
|
||||||
|
@ -65,12 +65,11 @@ static int bitpacked_decode_yuv422p10(AVCodecContext *avctx, AVFrame *frame,
|
|||||||
{
|
{
|
||||||
uint64_t frame_size = (uint64_t)avctx->width * (uint64_t)avctx->height * 20;
|
uint64_t frame_size = (uint64_t)avctx->width * (uint64_t)avctx->height * 20;
|
||||||
uint64_t packet_size = (uint64_t)avpkt->size * 8;
|
uint64_t packet_size = (uint64_t)avpkt->size * 8;
|
||||||
ThreadFrame tframe = { .f = frame };
|
|
||||||
GetBitContext bc;
|
GetBitContext bc;
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
int ret, i, j;
|
int ret, i, j;
|
||||||
|
|
||||||
ret = ff_thread_get_buffer(avctx, &tframe, 0);
|
ret = ff_thread_get_buffer(avctx, frame, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -378,8 +378,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
CFHDContext *s = avctx->priv_data;
|
CFHDContext *s = avctx->priv_data;
|
||||||
CFHDDSPContext *dsp = &s->dsp;
|
CFHDDSPContext *dsp = &s->dsp;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
ThreadFrame frame = { .f = data };
|
AVFrame *const pic = data;
|
||||||
AVFrame *pic = data;
|
|
||||||
int ret = 0, i, j, plane, got_buffer = 0;
|
int ret = 0, i, j, plane, got_buffer = 0;
|
||||||
int16_t *coeff_data;
|
int16_t *coeff_data;
|
||||||
|
|
||||||
@ -681,10 +680,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
avctx->height = height;
|
avctx->height = height;
|
||||||
}
|
}
|
||||||
frame.f->width =
|
pic->width = pic->height = 0;
|
||||||
frame.f->height = 0;
|
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
s->coded_width = 0;
|
s->coded_width = 0;
|
||||||
@ -692,10 +690,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
s->coded_format = AV_PIX_FMT_NONE;
|
s->coded_format = AV_PIX_FMT_NONE;
|
||||||
got_buffer = 1;
|
got_buffer = 1;
|
||||||
} else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
|
} else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
|
||||||
frame.f->width =
|
pic->width = pic->height = 0;
|
||||||
frame.f->height = 0;
|
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
s->coded_width = 0;
|
s->coded_width = 0;
|
||||||
s->coded_height = 0;
|
s->coded_height = 0;
|
||||||
|
@ -360,7 +360,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
CLLCContext *ctx = avctx->priv_data;
|
CLLCContext *ctx = avctx->priv_data;
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
uint8_t *src = avpkt->data;
|
uint8_t *src = avpkt->data;
|
||||||
uint32_t info_tag, info_offset;
|
uint32_t info_tag, info_offset;
|
||||||
int data_size;
|
int data_size;
|
||||||
@ -424,7 +423,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
avctx->bits_per_raw_sample = 8;
|
avctx->bits_per_raw_sample = 8;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = decode_yuv_frame(ctx, &gb, pic);
|
ret = decode_yuv_frame(ctx, &gb, pic);
|
||||||
@ -437,7 +436,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||||
avctx->bits_per_raw_sample = 8;
|
avctx->bits_per_raw_sample = 8;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = decode_rgb24_frame(ctx, &gb, pic);
|
ret = decode_rgb24_frame(ctx, &gb, pic);
|
||||||
@ -449,7 +448,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
avctx->pix_fmt = AV_PIX_FMT_ARGB;
|
avctx->pix_fmt = AV_PIX_FMT_ARGB;
|
||||||
avctx->bits_per_raw_sample = 8;
|
avctx->bits_per_raw_sample = 8;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = decode_argb_frame(ctx, &gb, pic);
|
ret = decode_argb_frame(ctx, &gb, pic);
|
||||||
|
@ -174,7 +174,6 @@ static int cri_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
CRIContext *s = avctx->priv_data;
|
CRIContext *s = avctx->priv_data;
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
int ret, bps, hflip = 0, vflip = 0;
|
int ret, bps, hflip = 0, vflip = 0;
|
||||||
AVFrameSideData *rotation;
|
AVFrameSideData *rotation;
|
||||||
int compressed = 0;
|
int compressed = 0;
|
||||||
@ -318,7 +317,7 @@ skip:
|
|||||||
if (!s->data || !s->data_size)
|
if (!s->data || !s->data_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
avctx->bits_per_raw_sample = bps;
|
avctx->bits_per_raw_sample = bps;
|
||||||
|
@ -618,7 +618,6 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
DNXHDContext *ctx = avctx->priv_data;
|
DNXHDContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
int first_field = 1;
|
int first_field = 1;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
@ -650,7 +649,7 @@ decode_coding_unit:
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (first_field) {
|
if (first_field) {
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
picture->pict_type = AV_PICTURE_TYPE_I;
|
picture->pict_type = AV_PICTURE_TYPE_I;
|
||||||
picture->key_frame = 1;
|
picture->key_frame = 1;
|
||||||
|
@ -612,7 +612,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
DVVideoContext *s = avctx->priv_data;
|
DVVideoContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
AVFrame *const frame = data;
|
||||||
const uint8_t *vsc_pack;
|
const uint8_t *vsc_pack;
|
||||||
int apt, is16_9, ret;
|
int apt, is16_9, ret;
|
||||||
const AVDVProfile *sys;
|
const AVDVProfile *sys;
|
||||||
@ -633,9 +633,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->sys = sys;
|
s->sys = sys;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->frame = frame.f;
|
s->frame = frame;
|
||||||
frame.f->key_frame = 1;
|
frame->key_frame = 1;
|
||||||
frame.f->pict_type = AV_PICTURE_TYPE_I;
|
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->pix_fmt = s->sys->pix_fmt;
|
avctx->pix_fmt = s->sys->pix_fmt;
|
||||||
avctx->framerate = av_inv_q(s->sys->time_base);
|
avctx->framerate = av_inv_q(s->sys->time_base);
|
||||||
|
|
||||||
@ -652,20 +652,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ff_set_sar(avctx, s->sys->sar[is16_9]);
|
ff_set_sar(avctx, s->sys->sar[is16_9]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Determine the codec's field order from the packet */
|
/* Determine the codec's field order from the packet */
|
||||||
if ( *vsc_pack == dv_video_control ) {
|
if ( *vsc_pack == dv_video_control ) {
|
||||||
if (avctx->height == 720) {
|
if (avctx->height == 720) {
|
||||||
frame.f->interlaced_frame = 0;
|
frame->interlaced_frame = 0;
|
||||||
frame.f->top_field_first = 0;
|
frame->top_field_first = 0;
|
||||||
} else if (avctx->height == 1080) {
|
} else if (avctx->height == 1080) {
|
||||||
frame.f->interlaced_frame = 1;
|
frame->interlaced_frame = 1;
|
||||||
frame.f->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
|
frame->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
|
||||||
} else {
|
} else {
|
||||||
frame.f->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
|
frame->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
|
||||||
frame.f->top_field_first = !(vsc_pack[3] & 0x40);
|
frame->top_field_first = !(vsc_pack[3] & 0x40);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +92,6 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
const uint8_t *src, int src_size,
|
const uint8_t *src, int src_size,
|
||||||
int id, int bpp, uint32_t vflipped)
|
int id, int bpp, uint32_t vflipped)
|
||||||
{
|
{
|
||||||
ThreadFrame frame = { .f = pic };
|
|
||||||
int h;
|
int h;
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
int ret;
|
int ret;
|
||||||
@ -103,7 +102,7 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
avctx->pix_fmt = id;
|
avctx->pix_fmt = id;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
do_vflip(avctx, pic, vflipped);
|
do_vflip(avctx, pic, vflipped);
|
||||||
@ -124,7 +123,6 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
const uint8_t *src, int src_size,
|
const uint8_t *src, int src_size,
|
||||||
uint32_t vflipped)
|
uint32_t vflipped)
|
||||||
{
|
{
|
||||||
ThreadFrame frame = { .f = pic };
|
|
||||||
int h, w;
|
int h, w;
|
||||||
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
||||||
int height, width, hmargin, vmargin;
|
int height, width, hmargin, vmargin;
|
||||||
@ -137,7 +135,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
do_vflip(avctx, pic, vflipped);
|
do_vflip(avctx, pic, vflipped);
|
||||||
@ -220,7 +218,6 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
const uint8_t *src, int src_size,
|
const uint8_t *src, int src_size,
|
||||||
uint32_t vflipped)
|
uint32_t vflipped)
|
||||||
{
|
{
|
||||||
ThreadFrame frame = { .f = pic };
|
|
||||||
int h, w;
|
int h, w;
|
||||||
uint8_t *Y1, *Y2, *U, *V;
|
uint8_t *Y1, *Y2, *U, *V;
|
||||||
int height, width, hmargin, vmargin;
|
int height, width, hmargin, vmargin;
|
||||||
@ -233,7 +230,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
do_vflip(avctx, pic, vflipped);
|
do_vflip(avctx, pic, vflipped);
|
||||||
@ -293,7 +290,6 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
const uint8_t *src, int src_size,
|
const uint8_t *src, int src_size,
|
||||||
uint32_t vflipped)
|
uint32_t vflipped)
|
||||||
{
|
{
|
||||||
ThreadFrame frame = { .f = pic };
|
|
||||||
int h, w;
|
int h, w;
|
||||||
uint8_t *Y, *U, *V;
|
uint8_t *Y, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
@ -304,7 +300,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
do_vflip(avctx, pic, vflipped);
|
do_vflip(avctx, pic, vflipped);
|
||||||
@ -429,7 +425,6 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
enum AVPixelFormat fmt,
|
enum AVPixelFormat fmt,
|
||||||
uint32_t vflipped)
|
uint32_t vflipped)
|
||||||
{
|
{
|
||||||
ThreadFrame frame = { .f = pic };
|
|
||||||
GetByteContext gb, gb_check;
|
GetByteContext gb, gb_check;
|
||||||
GetBitContext gb2;
|
GetBitContext gb2;
|
||||||
int nslices, slice, line = 0;
|
int nslices, slice, line = 0;
|
||||||
@ -456,7 +451,7 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
avctx->pix_fmt = fmt;
|
avctx->pix_fmt = fmt;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
do_vflip(avctx, pic, vflipped);
|
do_vflip(avctx, pic, vflipped);
|
||||||
|
@ -1042,7 +1042,7 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DXVContext *ctx = avctx->priv_data;
|
DXVContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame tframe;
|
AVFrame *const frame = data;
|
||||||
GetByteContext *gbc = &ctx->gbc;
|
GetByteContext *gbc = &ctx->gbc;
|
||||||
int (*decompress_tex)(AVCodecContext *avctx);
|
int (*decompress_tex)(AVCodecContext *avctx);
|
||||||
const char *msgcomp, *msgtext;
|
const char *msgcomp, *msgtext;
|
||||||
@ -1211,18 +1211,17 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
tframe.f = data;
|
ret = ff_thread_get_buffer(avctx, frame, 0);
|
||||||
ret = ff_thread_get_buffer(avctx, &tframe, 0);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Now decompress the texture with the standard functions. */
|
/* Now decompress the texture with the standard functions. */
|
||||||
avctx->execute2(avctx, decompress_texture_thread,
|
avctx->execute2(avctx, decompress_texture_thread,
|
||||||
tframe.f, NULL, ctx->slice_count);
|
frame, NULL, ctx->slice_count);
|
||||||
|
|
||||||
/* Frame is ready to be output. */
|
/* Frame is ready to be output. */
|
||||||
tframe.f->pict_type = AV_PICTURE_TYPE_I;
|
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
tframe.f->key_frame = 1;
|
frame->key_frame = 1;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
|
@ -72,7 +72,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
|
|||||||
pp->max_width = seq->max_frame_width_minus_1 + 1;
|
pp->max_width = seq->max_frame_width_minus_1 + 1;
|
||||||
pp->max_height = seq->max_frame_height_minus_1 + 1;
|
pp->max_height = seq->max_frame_height_minus_1 + 1;
|
||||||
|
|
||||||
pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.tf.f);
|
pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.f);
|
||||||
pp->superres_denom = frame_header->use_superres ? frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : AV1_SUPERRES_NUM;
|
pp->superres_denom = frame_header->use_superres ? frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : AV1_SUPERRES_NUM;
|
||||||
pp->bitdepth = get_bit_depth_from_seq(seq);
|
pp->bitdepth = get_bit_depth_from_seq(seq);
|
||||||
pp->seq_profile = seq->seq_profile;
|
pp->seq_profile = seq->seq_profile;
|
||||||
@ -132,7 +132,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
|
|||||||
memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
|
memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
|
||||||
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
|
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
|
||||||
int8_t ref_idx = frame_header->ref_frame_idx[i];
|
int8_t ref_idx = frame_header->ref_frame_idx[i];
|
||||||
AVFrame *ref_frame = h->ref[ref_idx].tf.f;
|
AVFrame *ref_frame = h->ref[ref_idx].f;
|
||||||
|
|
||||||
pp->frame_refs[i].width = ref_frame->width;
|
pp->frame_refs[i].width = ref_frame->width;
|
||||||
pp->frame_refs[i].height = ref_frame->height;
|
pp->frame_refs[i].height = ref_frame->height;
|
||||||
@ -146,7 +146,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
||||||
AVFrame *ref_frame = h->ref[i].tf.f;
|
AVFrame *ref_frame = h->ref[i].f;
|
||||||
if (ref_frame->buf[0])
|
if (ref_frame->buf[0])
|
||||||
pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
|
pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
|
||||||
}
|
}
|
||||||
@ -436,7 +436,7 @@ static int dxva2_av1_end_frame(AVCodecContext *avctx)
|
|||||||
if (ctx_pic->bitstream_size <= 0)
|
if (ctx_pic->bitstream_size <= 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.tf.f,
|
ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.f,
|
||||||
&ctx_pic->pp, sizeof(ctx_pic->pp),
|
&ctx_pic->pp, sizeof(ctx_pic->pp),
|
||||||
NULL, 0,
|
NULL, 0,
|
||||||
commit_bitstream_and_slice_buffer);
|
commit_bitstream_and_slice_buffer);
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "me_cmp.h"
|
#include "me_cmp.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
///< current MB is the first after a resync marker
|
///< current MB is the first after a resync marker
|
||||||
#define VP_START 1
|
#define VP_START 1
|
||||||
|
@ -2027,7 +2027,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
EXRContext *s = avctx->priv_data;
|
EXRContext *s = avctx->priv_data;
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
|
|
||||||
@ -2149,7 +2148,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->scan_lines_per_block;
|
s->scan_lines_per_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (bytestream2_get_bytes_left(gb)/8 < nb_blocks)
|
if (bytestream2_get_bytes_left(gb)/8 < nb_blocks)
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
#include "mathops.h"
|
#include "mathops.h"
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
#include "rangecoder.h"
|
#include "rangecoder.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
#ifdef __INTEL_COMPILER
|
#ifdef __INTEL_COMPILER
|
||||||
#undef av_flatten
|
#undef av_flatten
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "golomb.h"
|
#include "golomb.h"
|
||||||
#include "mathops.h"
|
#include "mathops.h"
|
||||||
#include "ffv1.h"
|
#include "ffv1.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
|
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
|
||||||
|
@ -559,7 +559,6 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
ThreadFrame tframe = { .f = data };
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
FLACContext *s = avctx->priv_data;
|
FLACContext *s = avctx->priv_data;
|
||||||
@ -618,7 +617,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
frame->nb_samples = s->blocksize;
|
frame->nb_samples = s->blocksize;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
|
s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
|
||||||
|
@ -140,7 +140,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
FrapsContext * const s = avctx->priv_data;
|
FrapsContext * const s = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame * const f = data;
|
AVFrame * const f = data;
|
||||||
uint32_t header;
|
uint32_t header;
|
||||||
unsigned int version,header_size;
|
unsigned int version,header_size;
|
||||||
@ -227,7 +226,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
: AVCOL_RANGE_JPEG;
|
: AVCOL_RANGE_JPEG;
|
||||||
avctx->colorspace = version & 1 ? AVCOL_SPC_UNSPECIFIED : AVCOL_SPC_BT709;
|
avctx->colorspace = version & 1 ? AVCOL_SPC_UNSPECIFIED : AVCOL_SPC_BT709;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, f, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
switch (version) {
|
switch (version) {
|
||||||
|
@ -30,18 +30,19 @@
|
|||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "h264dec.h"
|
#include "h264dec.h"
|
||||||
#include "mpegutils.h"
|
#include "mpegutils.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
|
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
|
||||||
{
|
{
|
||||||
int off = offsetof(H264Picture, tf_grain) + sizeof(pic->tf_grain);
|
int off = offsetof(H264Picture, f_grain) + sizeof(pic->f_grain);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!pic->f || !pic->f->buf[0])
|
if (!pic->f || !pic->f->buf[0])
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ff_thread_release_ext_buffer(h->avctx, &pic->tf);
|
ff_thread_release_ext_buffer(h->avctx, &pic->tf);
|
||||||
ff_thread_release_buffer(h->avctx, &pic->tf_grain);
|
ff_thread_release_buffer(h->avctx, pic->f_grain);
|
||||||
av_buffer_unref(&pic->hwaccel_priv_buf);
|
av_buffer_unref(&pic->hwaccel_priv_buf);
|
||||||
|
|
||||||
av_buffer_unref(&pic->qscale_table_buf);
|
av_buffer_unref(&pic->qscale_table_buf);
|
||||||
@ -102,9 +103,7 @@ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (src->needs_fg) {
|
if (src->needs_fg) {
|
||||||
av_assert0(src->tf_grain.f == src->f_grain);
|
ret = av_frame_ref(dst->f_grain, src->f_grain);
|
||||||
dst->tf_grain.f = dst->f_grain;
|
|
||||||
ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
@ -161,10 +160,8 @@ int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (src->needs_fg) {
|
if (src->needs_fg) {
|
||||||
av_assert0(src->tf_grain.f == src->f_grain);
|
ff_thread_release_buffer(h->avctx, dst->f_grain);
|
||||||
dst->tf_grain.f = dst->f_grain;
|
ret = av_frame_ref(dst->f_grain, src->f_grain);
|
||||||
ff_thread_release_buffer(h->avctx, &dst->tf_grain);
|
|
||||||
ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
#include "mathops.h"
|
#include "mathops.h"
|
||||||
#include "mpegutils.h"
|
#include "mpegutils.h"
|
||||||
#include "rectangle.h"
|
#include "rectangle.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
static const uint8_t field_scan[16+1] = {
|
static const uint8_t field_scan[16+1] = {
|
||||||
@ -197,11 +198,10 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (pic->needs_fg) {
|
if (pic->needs_fg) {
|
||||||
pic->tf_grain.f = pic->f_grain;
|
|
||||||
pic->f_grain->format = pic->f->format;
|
pic->f_grain->format = pic->f->format;
|
||||||
pic->f_grain->width = pic->f->width;
|
pic->f_grain->width = pic->f->width;
|
||||||
pic->f_grain->height = pic->f->height;
|
pic->f_grain->height = pic->f->height;
|
||||||
ret = ff_thread_get_buffer(h->avctx, &pic->tf_grain, 0);
|
ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
#include "mpegutils.h"
|
#include "mpegutils.h"
|
||||||
#include "profiles.h"
|
#include "profiles.h"
|
||||||
#include "rectangle.h"
|
#include "rectangle.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
|
const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
|
||||||
|
@ -109,7 +109,6 @@ typedef struct H264Picture {
|
|||||||
ThreadFrame tf;
|
ThreadFrame tf;
|
||||||
|
|
||||||
AVFrame *f_grain;
|
AVFrame *f_grain;
|
||||||
ThreadFrame tf_grain;
|
|
||||||
|
|
||||||
AVBufferRef *qscale_table_buf;
|
AVBufferRef *qscale_table_buf;
|
||||||
int8_t *qscale_table;
|
int8_t *qscale_table;
|
||||||
|
@ -305,7 +305,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
HapContext *ctx = avctx->priv_data;
|
HapContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame tframe;
|
AVFrame *const frame = data;
|
||||||
int ret, i, t;
|
int ret, i, t;
|
||||||
int section_size;
|
int section_size;
|
||||||
enum HapSectionType section_type;
|
enum HapSectionType section_type;
|
||||||
@ -330,8 +330,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Get the output frame ready to receive data */
|
/* Get the output frame ready to receive data */
|
||||||
tframe.f = data;
|
ret = ff_thread_get_buffer(avctx, frame, 0);
|
||||||
ret = ff_thread_get_buffer(avctx, &tframe, 0);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -383,16 +382,15 @@ static int hap_decode(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* Use the decompress function on the texture, one block per thread */
|
/* Use the decompress function on the texture, one block per thread */
|
||||||
if (t == 0){
|
if (t == 0){
|
||||||
avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);
|
avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
|
||||||
} else{
|
} else{
|
||||||
tframe.f = data;
|
avctx->execute2(avctx, decompress_texture2_thread, frame, NULL, ctx->slice_count);
|
||||||
avctx->execute2(avctx, decompress_texture2_thread, tframe.f, NULL, ctx->slice_count);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Frame is ready to be output */
|
/* Frame is ready to be output */
|
||||||
tframe.f->pict_type = AV_PICTURE_TYPE_I;
|
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
tframe.f->key_frame = 1;
|
frame->key_frame = 1;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
|
@ -37,7 +37,7 @@ void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
|
|||||||
frame->flags &= ~flags;
|
frame->flags &= ~flags;
|
||||||
if (!frame->flags) {
|
if (!frame->flags) {
|
||||||
ff_thread_release_ext_buffer(s->avctx, &frame->tf);
|
ff_thread_release_ext_buffer(s->avctx, &frame->tf);
|
||||||
ff_thread_release_buffer(s->avctx, &frame->tf_grain);
|
ff_thread_release_buffer(s->avctx, frame->frame_grain);
|
||||||
frame->needs_fg = 0;
|
frame->needs_fg = 0;
|
||||||
|
|
||||||
av_buffer_unref(&frame->tab_mvf_buf);
|
av_buffer_unref(&frame->tab_mvf_buf);
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
#include "hwconfig.h"
|
#include "hwconfig.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "profiles.h"
|
#include "profiles.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
|
const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
|
||||||
@ -3027,7 +3028,7 @@ static int hevc_frame_start(HEVCContext *s)
|
|||||||
s->ref->frame_grain->format = s->ref->frame->format;
|
s->ref->frame_grain->format = s->ref->frame->format;
|
||||||
s->ref->frame_grain->width = s->ref->frame->width;
|
s->ref->frame_grain->width = s->ref->frame->width;
|
||||||
s->ref->frame_grain->height = s->ref->frame->height;
|
s->ref->frame_grain->height = s->ref->frame->height;
|
||||||
if ((ret = ff_thread_get_buffer(s->avctx, &s->ref->tf_grain, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3534,7 +3535,7 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (src->needs_fg) {
|
if (src->needs_fg) {
|
||||||
ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
|
ret = av_frame_ref(dst->frame_grain, src->frame_grain);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
dst->needs_fg = 1;
|
dst->needs_fg = 1;
|
||||||
@ -3653,7 +3654,6 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
|
|||||||
s->DPB[i].frame_grain = av_frame_alloc();
|
s->DPB[i].frame_grain = av_frame_alloc();
|
||||||
if (!s->DPB[i].frame_grain)
|
if (!s->DPB[i].frame_grain)
|
||||||
goto fail;
|
goto fail;
|
||||||
s->DPB[i].tf_grain.f = s->DPB[i].frame_grain;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->max_ra = INT_MAX;
|
s->max_ra = INT_MAX;
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
#include "hevc_sei.h"
|
#include "hevc_sei.h"
|
||||||
#include "hevcdsp.h"
|
#include "hevcdsp.h"
|
||||||
#include "h274.h"
|
#include "h274.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
#include "videodsp.h"
|
#include "videodsp.h"
|
||||||
|
|
||||||
#define SHIFT_CTB_WPP 2
|
#define SHIFT_CTB_WPP 2
|
||||||
@ -394,7 +394,6 @@ typedef struct HEVCFrame {
|
|||||||
AVFrame *frame;
|
AVFrame *frame;
|
||||||
AVFrame *frame_grain;
|
AVFrame *frame_grain;
|
||||||
ThreadFrame tf;
|
ThreadFrame tf;
|
||||||
ThreadFrame tf_grain;
|
|
||||||
int needs_fg; /* 1 if grain needs to be applied by the decoder */
|
int needs_fg; /* 1 if grain needs to be applied by the decoder */
|
||||||
MvField *tab_mvf;
|
MvField *tab_mvf;
|
||||||
RefPicList *refPicList;
|
RefPicList *refPicList;
|
||||||
|
@ -404,7 +404,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_picture_ptr, AVPacket *avpkt)
|
int *got_picture_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
HQXContext *ctx = avctx->priv_data;
|
HQXContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
AVFrame *const frame = data;
|
||||||
uint8_t *src = avpkt->data;
|
uint8_t *src = avpkt->data;
|
||||||
uint32_t info_tag;
|
uint32_t info_tag;
|
||||||
int data_start;
|
int data_start;
|
||||||
@ -499,7 +499,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ff_thread_get_buffer(avctx, &frame, 0);
|
ret = ff_thread_get_buffer(avctx, frame, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1185,7 +1185,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
const int width = s->width;
|
const int width = s->width;
|
||||||
const int height = s->height;
|
const int height = s->height;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *const p = data;
|
AVFrame *const p = data;
|
||||||
int slice, table_size = 0, ret, nb_slices;
|
int slice, table_size = 0, ret, nb_slices;
|
||||||
unsigned slices_info_offset;
|
unsigned slices_info_offset;
|
||||||
@ -1203,7 +1202,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
|
s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
|
||||||
(const uint32_t *) buf, buf_size / 4);
|
(const uint32_t *) buf, buf_size / 4);
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (s->context) {
|
if (s->context) {
|
||||||
|
@ -2476,7 +2476,6 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
Jpeg2000DecoderContext *s = avctx->priv_data;
|
Jpeg2000DecoderContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2517,7 +2516,7 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
/* get picture buffer */
|
/* get picture buffer */
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
|
||||||
goto end;
|
goto end;
|
||||||
picture->pict_type = AV_PICTURE_TYPE_I;
|
picture->pict_type = AV_PICTURE_TYPE_I;
|
||||||
picture->key_frame = 1;
|
picture->key_frame = 1;
|
||||||
|
@ -540,7 +540,6 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
unsigned int buf_size = avpkt->size;
|
unsigned int buf_size = avpkt->size;
|
||||||
LagarithContext *l = avctx->priv_data;
|
LagarithContext *l = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *const p = data;
|
AVFrame *const p = data;
|
||||||
uint8_t frametype;
|
uint8_t frametype;
|
||||||
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
|
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
|
||||||
@ -569,7 +568,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
planes = 4;
|
planes = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (frametype == FRAME_SOLID_RGBA) {
|
if (frametype == FRAME_SOLID_RGBA) {
|
||||||
@ -593,7 +592,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
avctx->pix_fmt = AV_PIX_FMT_GBRAP;
|
avctx->pix_fmt = AV_PIX_FMT_GBRAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p,0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < avctx->height; i++) {
|
for (i = 0; i < avctx->height; i++) {
|
||||||
@ -614,7 +613,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
|
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
|
||||||
avctx->pix_fmt = AV_PIX_FMT_GBRP;
|
avctx->pix_fmt = AV_PIX_FMT_GBRP;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
offs[0] = offset_bv;
|
offs[0] = offset_bv;
|
||||||
@ -650,7 +649,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
case FRAME_ARITH_YUY2:
|
case FRAME_ARITH_YUY2:
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (offset_ry >= buf_size ||
|
if (offset_ry >= buf_size ||
|
||||||
@ -678,7 +677,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
|||||||
case FRAME_ARITH_YV12:
|
case FRAME_ARITH_YV12:
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (offset_ry >= buf_size ||
|
if (offset_ry >= buf_size ||
|
||||||
|
@ -158,7 +158,6 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i
|
|||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
|
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
ThreadFrame tframe = { .f = data };
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
LclDecContext * const c = avctx->priv_data;
|
LclDecContext * const c = avctx->priv_data;
|
||||||
@ -175,7 +174,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
unsigned int len = buf_size;
|
unsigned int len = buf_size;
|
||||||
int linesize, offset;
|
int linesize, offset;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
outptr = frame->data[0]; // Output image pointer
|
outptr = frame->data[0]; // Output image pointer
|
||||||
|
@ -324,7 +324,6 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
|
|||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
LibOpenJPEGContext *ctx = avctx->priv_data;
|
LibOpenJPEGContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
const AVPixFmtDescriptor *desc;
|
const AVPixFmtDescriptor *desc;
|
||||||
int width, height, ret;
|
int width, height, ret;
|
||||||
@ -417,7 +416,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
|
|||||||
if (image->comps[i].prec > avctx->bits_per_raw_sample)
|
if (image->comps[i].prec > avctx->bits_per_raw_sample)
|
||||||
avctx->bits_per_raw_sample = image->comps[i].prec;
|
avctx->bits_per_raw_sample = image->comps[i].prec;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
ret = !opj_decode(dec, stream, image);
|
ret = !opj_decode(dec, stream, image);
|
||||||
|
@ -431,7 +431,6 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
MagicYUVContext *s = avctx->priv_data;
|
MagicYUVContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *p = data;
|
AVFrame *p = data;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
uint32_t first_offset, offset, next_offset, header_size, slice_width;
|
uint32_t first_offset, offset, next_offset, header_size, slice_width;
|
||||||
@ -641,7 +640,7 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
p->key_frame = 1;
|
p->key_frame = 1;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
s->buf = avpkt->data;
|
s->buf = avpkt->data;
|
||||||
|
@ -42,7 +42,6 @@ typedef struct MDECContext {
|
|||||||
BlockDSPContext bdsp;
|
BlockDSPContext bdsp;
|
||||||
BswapDSPContext bbdsp;
|
BswapDSPContext bbdsp;
|
||||||
IDCTDSPContext idsp;
|
IDCTDSPContext idsp;
|
||||||
ThreadFrame frame;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
ScanTable scantable;
|
ScanTable scantable;
|
||||||
int version;
|
int version;
|
||||||
@ -174,13 +173,13 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
MDECContext * const a = avctx->priv_data;
|
MDECContext * const a = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ThreadFrame frame = { .f = data };
|
AVFrame *const frame = data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
frame.f->pict_type = AV_PICTURE_TYPE_I;
|
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
frame.f->key_frame = 1;
|
frame->key_frame = 1;
|
||||||
|
|
||||||
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size);
|
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size);
|
||||||
if (!a->bitstream_buffer)
|
if (!a->bitstream_buffer)
|
||||||
@ -202,7 +201,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
if ((ret = decode_mb(a, a->block)) < 0)
|
if ((ret = decode_mb(a, a->block)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
idct_put(a, frame.f, a->mb_x, a->mb_y);
|
idct_put(a, frame, a->mb_x, a->mb_y);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "motion_est.h"
|
#include "motion_est.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
#define MPEGVIDEO_MAX_PLANES 4
|
#define MPEGVIDEO_MAX_PLANES 4
|
||||||
#define MAX_PICTURE_COUNT 36
|
#define MAX_PICTURE_COUNT 36
|
||||||
|
@ -146,7 +146,7 @@ static int lz4_decompress(AVCodecContext *avctx,
|
|||||||
return bytestream2_tell_p(pb);
|
return bytestream2_tell_p(pb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
|
static int decode_blocks(AVCodecContext *avctx, AVFrame *p,
|
||||||
unsigned uncompressed_size)
|
unsigned uncompressed_size)
|
||||||
{
|
{
|
||||||
NotchLCContext *s = avctx->priv_data;
|
NotchLCContext *s = avctx->priv_data;
|
||||||
@ -221,7 +221,7 @@ static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
s->uv_count_offset = s->y_data_offset - s->a_data_offset;
|
s->uv_count_offset = s->y_data_offset - s->a_data_offset;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
rgb = *gb;
|
rgb = *gb;
|
||||||
@ -464,7 +464,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
NotchLCContext *s = avctx->priv_data;
|
NotchLCContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
PutByteContext *pb = &s->pb;
|
PutByteContext *pb = &s->pb;
|
||||||
unsigned uncompressed_size;
|
unsigned uncompressed_size;
|
||||||
@ -513,7 +512,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
|
bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = decode_blocks(avctx, p, &frame, uncompressed_size);
|
ret = decode_blocks(avctx, p, uncompressed_size);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
|
|||||||
CUVIDAV1PICPARAMS *ppc = &pp->CodecSpecific.av1;
|
CUVIDAV1PICPARAMS *ppc = &pp->CodecSpecific.av1;
|
||||||
FrameDecodeData *fdd;
|
FrameDecodeData *fdd;
|
||||||
NVDECFrame *cf;
|
NVDECFrame *cf;
|
||||||
AVFrame *cur_frame = s->cur_frame.tf.f;
|
AVFrame *cur_frame = s->cur_frame.f;
|
||||||
|
|
||||||
unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
|
unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
|
||||||
|
|
||||||
@ -233,7 +233,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
|
|||||||
ppc->loop_filter_ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
|
ppc->loop_filter_ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
|
||||||
|
|
||||||
/* Reference Frames */
|
/* Reference Frames */
|
||||||
ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].tf.f);
|
ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].f);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
|
if (frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
|
||||||
@ -246,7 +246,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
|
|||||||
for (i = 0; i < AV1_REFS_PER_FRAME; ++i) {
|
for (i = 0; i < AV1_REFS_PER_FRAME; ++i) {
|
||||||
/* Ref Frame List */
|
/* Ref Frame List */
|
||||||
int8_t ref_idx = frame_header->ref_frame_idx[i];
|
int8_t ref_idx = frame_header->ref_frame_idx[i];
|
||||||
AVFrame *ref_frame = s->ref[ref_idx].tf.f;
|
AVFrame *ref_frame = s->ref[ref_idx].f;
|
||||||
|
|
||||||
ppc->ref_frame[i].index = ppc->ref_frame_map[ref_idx];
|
ppc->ref_frame[i].index = ppc->ref_frame_map[ref_idx];
|
||||||
ppc->ref_frame[i].width = ref_frame->width;
|
ppc->ref_frame[i].width = ref_frame->width;
|
||||||
|
@ -293,7 +293,6 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
PhotoCDContext *s = avctx->priv_data;
|
PhotoCDContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
AVFrame *p = data;
|
AVFrame *p = data;
|
||||||
@ -326,7 +325,7 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
@ -606,7 +606,6 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
PixletContext *ctx = avctx->priv_data;
|
PixletContext *ctx = avctx->priv_data;
|
||||||
int i, w, h, width, height, ret, version;
|
int i, w, h, width, height, ret, version;
|
||||||
AVFrame *p = data;
|
AVFrame *p = data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
uint32_t pktsize, depth;
|
uint32_t pktsize, depth;
|
||||||
|
|
||||||
bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
|
bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
|
||||||
@ -673,20 +672,20 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
p->key_frame = 1;
|
p->key_frame = 1;
|
||||||
p->color_range = AVCOL_RANGE_JPEG;
|
p->color_range = AVCOL_RANGE_JPEG;
|
||||||
|
|
||||||
ret = ff_thread_get_buffer(avctx, &frame, 0);
|
ret = ff_thread_get_buffer(avctx, p, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane(avctx, i, avpkt, frame.f);
|
ret = decode_plane(avctx, i, avpkt, p);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if (avctx->flags & AV_CODEC_FLAG_GRAY)
|
if (avctx->flags & AV_CODEC_FLAG_GRAY)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
postprocess_luma(avctx, frame.f, ctx->w, ctx->h, ctx->depth);
|
postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
|
||||||
postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
|
postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
|
@ -779,7 +779,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ProresContext *ctx = avctx->priv_data;
|
ProresContext *ctx = avctx->priv_data;
|
||||||
ThreadFrame tframe = { .f = data };
|
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -805,7 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
buf += frame_hdr_size;
|
buf += frame_hdr_size;
|
||||||
buf_size -= frame_hdr_size;
|
buf_size -= frame_hdr_size;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ff_thread_finish_setup(avctx);
|
ff_thread_finish_setup(avctx);
|
||||||
|
|
||||||
|
@ -948,15 +948,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
|
||||||
{
|
{
|
||||||
PerThreadContext *p;
|
PerThreadContext *p;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
f->owner[0] = f->owner[1] = avctx;
|
|
||||||
|
|
||||||
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
||||||
return ff_get_buffer(avctx, f->f, flags);
|
return ff_get_buffer(avctx, f, flags);
|
||||||
|
|
||||||
p = avctx->internal->thread_ctx;
|
p = avctx->internal->thread_ctx;
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
@ -971,28 +969,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
|
|
||||||
atomic_int *progress;
|
|
||||||
f->progress = av_buffer_alloc(2 * sizeof(*progress));
|
|
||||||
if (!f->progress) {
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
progress = (atomic_int*)f->progress->data;
|
|
||||||
|
|
||||||
atomic_init(&progress[0], -1);
|
|
||||||
atomic_init(&progress[1], -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_mutex_lock(&p->parent->buffer_mutex);
|
pthread_mutex_lock(&p->parent->buffer_mutex);
|
||||||
#if !FF_API_THREAD_SAFE_CALLBACKS
|
#if !FF_API_THREAD_SAFE_CALLBACKS
|
||||||
err = ff_get_buffer(avctx, f->f, flags);
|
err = ff_get_buffer(avctx, f->f, flags);
|
||||||
#else
|
#else
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
if (THREAD_SAFE_CALLBACKS(avctx)) {
|
if (THREAD_SAFE_CALLBACKS(avctx)) {
|
||||||
err = ff_get_buffer(avctx, f->f, flags);
|
err = ff_get_buffer(avctx, f, flags);
|
||||||
} else {
|
} else {
|
||||||
pthread_mutex_lock(&p->progress_mutex);
|
pthread_mutex_lock(&p->progress_mutex);
|
||||||
p->requested_frame = f->f;
|
p->requested_frame = f;
|
||||||
p->requested_flags = flags;
|
p->requested_flags = flags;
|
||||||
atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
|
atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
|
||||||
pthread_cond_broadcast(&p->progress_cond);
|
pthread_cond_broadcast(&p->progress_cond);
|
||||||
@ -1009,8 +995,6 @@ FF_DISABLE_DEPRECATION_WARNINGS
|
|||||||
ff_thread_finish_setup(avctx);
|
ff_thread_finish_setup(avctx);
|
||||||
FF_ENABLE_DEPRECATION_WARNINGS
|
FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
#endif
|
#endif
|
||||||
if (err)
|
|
||||||
av_buffer_unref(&f->progress);
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&p->parent->buffer_mutex);
|
pthread_mutex_unlock(&p->parent->buffer_mutex);
|
||||||
|
|
||||||
@ -1049,7 +1033,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
|
|||||||
FF_ENABLE_DEPRECATION_WARNINGS
|
FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
|
||||||
{
|
{
|
||||||
int ret = thread_get_buffer_internal(avctx, f, flags);
|
int ret = thread_get_buffer_internal(avctx, f, flags);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -1059,10 +1043,36 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
|||||||
|
|
||||||
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||||
{
|
{
|
||||||
return ff_thread_get_buffer(avctx, f, flags);
|
int ret;
|
||||||
|
|
||||||
|
f->owner[0] = f->owner[1] = avctx;
|
||||||
|
/* Hint: It is possible for this function to be called with codecs
|
||||||
|
* that don't support frame threading at all, namely in case
|
||||||
|
* a frame-threaded decoder shares code with codecs that are not.
|
||||||
|
* This currently affects non-MPEG-4 mpegvideo codecs and and VP7.
|
||||||
|
* The following check will always be true for them. */
|
||||||
|
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
|
||||||
|
return ff_get_buffer(avctx, f->f, flags);
|
||||||
|
|
||||||
|
if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
|
||||||
|
atomic_int *progress;
|
||||||
|
f->progress = av_buffer_alloc(2 * sizeof(*progress));
|
||||||
|
if (!f->progress) {
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
progress = (atomic_int*)f->progress->data;
|
||||||
|
|
||||||
|
atomic_init(&progress[0], -1);
|
||||||
|
atomic_init(&progress[1], -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
ret = ff_thread_get_buffer(avctx, f->f, flags);
|
||||||
|
if (ret)
|
||||||
|
av_buffer_unref(&f->progress);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
|
||||||
{
|
{
|
||||||
#if FF_API_THREAD_SAFE_CALLBACKS
|
#if FF_API_THREAD_SAFE_CALLBACKS
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
@ -1075,21 +1085,18 @@ FF_DISABLE_DEPRECATION_WARNINGS
|
|||||||
FF_ENABLE_DEPRECATION_WARNINGS
|
FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!f->f)
|
if (!f)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (avctx->debug & FF_DEBUG_BUFFERS)
|
if (avctx->debug & FF_DEBUG_BUFFERS)
|
||||||
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
|
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
|
||||||
|
|
||||||
av_buffer_unref(&f->progress);
|
|
||||||
f->owner[0] = f->owner[1] = NULL;
|
|
||||||
|
|
||||||
#if !FF_API_THREAD_SAFE_CALLBACKS
|
#if !FF_API_THREAD_SAFE_CALLBACKS
|
||||||
av_frame_unref(f->f);
|
av_frame_unref(f->f);
|
||||||
#else
|
#else
|
||||||
// when the frame buffers are not allocated, just reset it to clean state
|
// when the frame buffers are not allocated, just reset it to clean state
|
||||||
if (can_direct_free || !f->f->buf[0]) {
|
if (can_direct_free || !f->buf[0]) {
|
||||||
av_frame_unref(f->f);
|
av_frame_unref(f);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1113,7 +1120,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
}
|
}
|
||||||
|
|
||||||
dst = p->released_buffers[p->num_released_buffers];
|
dst = p->released_buffers[p->num_released_buffers];
|
||||||
av_frame_move_ref(dst, f->f);
|
av_frame_move_ref(dst, f);
|
||||||
|
|
||||||
p->num_released_buffers++;
|
p->num_released_buffers++;
|
||||||
|
|
||||||
@ -1124,15 +1131,17 @@ fail:
|
|||||||
// this leaks, but it is better than crashing
|
// this leaks, but it is better than crashing
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
|
av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
|
||||||
memset(f->f->buf, 0, sizeof(f->f->buf));
|
memset(f->buf, 0, sizeof(f->buf));
|
||||||
if (f->f->extended_buf)
|
if (f->extended_buf)
|
||||||
memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
|
memset(f->extended_buf, 0, f->nb_extended_buf * sizeof(*f->extended_buf));
|
||||||
av_frame_unref(f->f);
|
av_frame_unref(f);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
||||||
{
|
{
|
||||||
ff_thread_release_buffer(avctx, f);
|
av_buffer_unref(&f->progress);
|
||||||
|
f->owner[0] = f->owner[1] = NULL;
|
||||||
|
ff_thread_release_buffer(avctx, f->f);
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
#include "mpeg_er.h"
|
#include "mpeg_er.h"
|
||||||
#include "qpeldsp.h"
|
#include "qpeldsp.h"
|
||||||
#include "rectangle.h"
|
#include "rectangle.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "threadframe.h"
|
#include "threadframe.h"
|
||||||
|
|
||||||
#include "rv34vlc.h"
|
#include "rv34vlc.h"
|
||||||
|
@ -1805,7 +1805,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
SheerVideoContext *s = avctx->priv_data;
|
SheerVideoContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
const SheerTable *table;
|
const SheerTable *table;
|
||||||
AVFrame *p = data;
|
AVFrame *p = data;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
@ -1977,7 +1976,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
p->key_frame = 1;
|
p->key_frame = 1;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((ret = init_get_bits8(&gb, avpkt->data + 20, avpkt->size - 20)) < 0)
|
if ((ret = init_get_bits8(&gb, avpkt->data + 20, avpkt->size - 20)) < 0)
|
||||||
|
@ -679,7 +679,6 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
TAKDecContext *s = avctx->priv_data;
|
TAKDecContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
ThreadFrame tframe = { .f = data };
|
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
int chan, i, ret, hsize;
|
int chan, i, ret, hsize;
|
||||||
|
|
||||||
@ -742,7 +741,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
: s->ti.frame_samples;
|
: s->ti.frame_samples;
|
||||||
|
|
||||||
frame->nb_samples = s->nb_samples;
|
frame->nb_samples = s->nb_samples;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ff_thread_finish_setup(avctx);
|
ff_thread_finish_setup(avctx);
|
||||||
|
|
||||||
|
@ -31,14 +31,6 @@
|
|||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
|
|
||||||
typedef struct ThreadFrame {
|
|
||||||
AVFrame *f;
|
|
||||||
AVCodecContext *owner[2];
|
|
||||||
// progress->data is an array of 2 ints holding progress for top/bottom
|
|
||||||
// fields
|
|
||||||
AVBufferRef *progress;
|
|
||||||
} ThreadFrame;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for decoding threads to finish and reset internal state.
|
* Wait for decoding threads to finish and reset internal state.
|
||||||
* Called by avcodec_flush_buffers().
|
* Called by avcodec_flush_buffers().
|
||||||
@ -92,7 +84,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
|
|||||||
* @param avctx The current context.
|
* @param avctx The current context.
|
||||||
* @param f The frame to write into.
|
* @param f The frame to write into.
|
||||||
*/
|
*/
|
||||||
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
|
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wrapper around release_buffer() frame-for multithreaded codecs.
|
* Wrapper around release_buffer() frame-for multithreaded codecs.
|
||||||
@ -105,9 +97,7 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
|
|||||||
* @param avctx The current context.
|
* @param avctx The current context.
|
||||||
* @param f The picture being released.
|
* @param f The picture being released.
|
||||||
*/
|
*/
|
||||||
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
|
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
|
||||||
|
|
||||||
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
|
|
||||||
|
|
||||||
int ff_thread_init(AVCodecContext *s);
|
int ff_thread_init(AVCodecContext *s);
|
||||||
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
|
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
|
||||||
|
@ -21,8 +21,16 @@
|
|||||||
#ifndef AVCODEC_THREADFRAME_H
|
#ifndef AVCODEC_THREADFRAME_H
|
||||||
#define AVCODEC_THREADFRAME_H
|
#define AVCODEC_THREADFRAME_H
|
||||||
|
|
||||||
|
#include "libavutil/frame.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "thread.h"
|
|
||||||
|
typedef struct ThreadFrame {
|
||||||
|
AVFrame *f;
|
||||||
|
AVCodecContext *owner[2];
|
||||||
|
// progress->data is an array of 2 ints holding progress for top/bottom
|
||||||
|
// fields
|
||||||
|
AVBufferRef *progress;
|
||||||
|
} ThreadFrame;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Notify later decoding threads when part of their reference picture is ready.
|
* Notify later decoding threads when part of their reference picture is ready.
|
||||||
@ -74,4 +82,6 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
|
|||||||
*/
|
*/
|
||||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
|
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
|
||||||
|
|
||||||
|
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1016,7 +1016,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame,
|
|||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_image(TiffContext *s, ThreadFrame *frame)
|
static int init_image(TiffContext *s, AVFrame *frame)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int create_gray_palette = 0;
|
int create_gray_palette = 0;
|
||||||
@ -1177,11 +1177,11 @@ static int init_image(TiffContext *s, ThreadFrame *frame)
|
|||||||
return ret;
|
return ret;
|
||||||
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||||
if (!create_gray_palette)
|
if (!create_gray_palette)
|
||||||
memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
|
memcpy(frame->data[1], s->palette, sizeof(s->palette));
|
||||||
else {
|
else {
|
||||||
/* make default grayscale pal */
|
/* make default grayscale pal */
|
||||||
int i;
|
int i;
|
||||||
uint32_t *pal = (uint32_t *)frame->f->data[1];
|
uint32_t *pal = (uint32_t *)frame->data[1];
|
||||||
for (i = 0; i < 1<<s->bpp; i++)
|
for (i = 0; i < 1<<s->bpp; i++)
|
||||||
pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
|
pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
|
||||||
}
|
}
|
||||||
@ -1743,7 +1743,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
{
|
{
|
||||||
TiffContext *const s = avctx->priv_data;
|
TiffContext *const s = avctx->priv_data;
|
||||||
AVFrame *const p = data;
|
AVFrame *const p = data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
unsigned off, last_off;
|
unsigned off, last_off;
|
||||||
int le, ret, plane, planes;
|
int le, ret, plane, planes;
|
||||||
int i, j, entries, stride;
|
int i, j, entries, stride;
|
||||||
@ -1894,7 +1893,7 @@ again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* now we have the data and may start decoding */
|
/* now we have the data and may start decoding */
|
||||||
if ((ret = init_image(s, &frame)) < 0)
|
if ((ret = init_image(s, p)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!s->is_tiled || has_strip_bits) {
|
if (!s->is_tiled || has_strip_bits) {
|
||||||
|
@ -222,7 +222,6 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
AVFrame *frame = data;
|
||||||
ThreadFrame tframe = { .f = data };
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
TTAContext *s = avctx->priv_data;
|
TTAContext *s = avctx->priv_data;
|
||||||
@ -242,7 +241,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
frame->nb_samples = framelen;
|
frame->nb_samples = framelen;
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
// decode directly to output buffer for 24-bit sample format
|
// decode directly to output buffer for 24-bit sample format
|
||||||
|
@ -889,10 +889,9 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
|
|||||||
return ff_get_format(avctx, fmt);
|
return ff_get_format(avctx, fmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
|
||||||
{
|
{
|
||||||
f->owner[0] = f->owner[1] = avctx;
|
return ff_get_buffer(avctx, f, flags);
|
||||||
return ff_get_buffer(avctx, f->f, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
||||||
@ -901,10 +900,10 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
|
|||||||
return ff_get_buffer(avctx, f->f, flags);
|
return ff_get_buffer(avctx, f->f, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
|
||||||
{
|
{
|
||||||
if (f->f)
|
if (f)
|
||||||
av_frame_unref(f->f);
|
av_frame_unref(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
|
||||||
|
@ -563,14 +563,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
UtvideoContext *c = avctx->priv_data;
|
UtvideoContext *c = avctx->priv_data;
|
||||||
|
AVFrame *const frame = data;
|
||||||
int i, j;
|
int i, j;
|
||||||
const uint8_t *plane_start[5];
|
const uint8_t *plane_start[5];
|
||||||
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
|
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
|
||||||
int ret;
|
int ret;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* parse plane structure to get frame flags and validate slice offsets */
|
/* parse plane structure to get frame flags and validate slice offsets */
|
||||||
@ -709,80 +709,80 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
case AV_PIX_FMT_GBRP:
|
case AV_PIX_FMT_GBRP:
|
||||||
case AV_PIX_FMT_GBRAP:
|
case AV_PIX_FMT_GBRAP:
|
||||||
for (i = 0; i < c->planes; i++) {
|
for (i = 0; i < c->planes; i++) {
|
||||||
ret = decode_plane(c, i, frame.f->data[i],
|
ret = decode_plane(c, i, frame->data[i],
|
||||||
frame.f->linesize[i], avctx->width,
|
frame->linesize[i], avctx->width,
|
||||||
avctx->height, plane_start[i],
|
avctx->height, plane_start[i],
|
||||||
c->frame_pred == PRED_LEFT);
|
c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (c->frame_pred == PRED_MEDIAN) {
|
if (c->frame_pred == PRED_MEDIAN) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_median_planar(c, frame.f->data[i],
|
restore_median_planar(c, frame->data[i],
|
||||||
frame.f->linesize[i], avctx->width,
|
frame->linesize[i], avctx->width,
|
||||||
avctx->height, c->slices, 0);
|
avctx->height, c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_median_planar_il(c, frame.f->data[i],
|
restore_median_planar_il(c, frame->data[i],
|
||||||
frame.f->linesize[i],
|
frame->linesize[i],
|
||||||
avctx->width, avctx->height, c->slices,
|
avctx->width, avctx->height, c->slices,
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
} else if (c->frame_pred == PRED_GRADIENT) {
|
} else if (c->frame_pred == PRED_GRADIENT) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_gradient_planar(c, frame.f->data[i],
|
restore_gradient_planar(c, frame->data[i],
|
||||||
frame.f->linesize[i], avctx->width,
|
frame->linesize[i], avctx->width,
|
||||||
avctx->height, c->slices, 0);
|
avctx->height, c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_gradient_planar_il(c, frame.f->data[i],
|
restore_gradient_planar_il(c, frame->data[i],
|
||||||
frame.f->linesize[i],
|
frame->linesize[i],
|
||||||
avctx->width, avctx->height, c->slices,
|
avctx->width, avctx->height, c->slices,
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
|
c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
|
||||||
frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
|
frame->linesize[2], frame->linesize[0], frame->linesize[1],
|
||||||
avctx->width, avctx->height);
|
avctx->width, avctx->height);
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_GBRAP10:
|
case AV_PIX_FMT_GBRAP10:
|
||||||
case AV_PIX_FMT_GBRP10:
|
case AV_PIX_FMT_GBRP10:
|
||||||
for (i = 0; i < c->planes; i++) {
|
for (i = 0; i < c->planes; i++) {
|
||||||
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
|
ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
|
||||||
frame.f->linesize[i] / 2, avctx->width,
|
frame->linesize[i] / 2, avctx->width,
|
||||||
avctx->height, plane_start[i],
|
avctx->height, plane_start[i],
|
||||||
plane_start[i + 1] - 1024,
|
plane_start[i + 1] - 1024,
|
||||||
c->frame_pred == PRED_LEFT);
|
c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
|
c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
|
||||||
frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
|
frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
|
||||||
avctx->width, avctx->height);
|
avctx->width, avctx->height);
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV420P:
|
case AV_PIX_FMT_YUV420P:
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
|
ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height >> !!i,
|
avctx->width >> !!i, avctx->height >> !!i,
|
||||||
plane_start[i], c->frame_pred == PRED_LEFT);
|
plane_start[i], c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (c->frame_pred == PRED_MEDIAN) {
|
if (c->frame_pred == PRED_MEDIAN) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height >> !!i,
|
avctx->width >> !!i, avctx->height >> !!i,
|
||||||
c->slices, !i);
|
c->slices, !i);
|
||||||
} else {
|
} else {
|
||||||
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i,
|
avctx->width >> !!i,
|
||||||
avctx->height >> !!i,
|
avctx->height >> !!i,
|
||||||
c->slices, !i);
|
c->slices, !i);
|
||||||
}
|
}
|
||||||
} else if (c->frame_pred == PRED_GRADIENT) {
|
} else if (c->frame_pred == PRED_GRADIENT) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height >> !!i,
|
avctx->width >> !!i, avctx->height >> !!i,
|
||||||
c->slices, !i);
|
c->slices, !i);
|
||||||
} else {
|
} else {
|
||||||
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i,
|
avctx->width >> !!i,
|
||||||
avctx->height >> !!i,
|
avctx->height >> !!i,
|
||||||
c->slices, !i);
|
c->slices, !i);
|
||||||
@ -792,28 +792,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV422P:
|
case AV_PIX_FMT_YUV422P:
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
|
ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
plane_start[i], c->frame_pred == PRED_LEFT);
|
plane_start[i], c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (c->frame_pred == PRED_MEDIAN) {
|
if (c->frame_pred == PRED_MEDIAN) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
}
|
}
|
||||||
} else if (c->frame_pred == PRED_GRADIENT) {
|
} else if (c->frame_pred == PRED_GRADIENT) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
}
|
}
|
||||||
@ -822,28 +822,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV444P:
|
case AV_PIX_FMT_YUV444P:
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
|
ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
|
||||||
avctx->width, avctx->height,
|
avctx->width, avctx->height,
|
||||||
plane_start[i], c->frame_pred == PRED_LEFT);
|
plane_start[i], c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
if (c->frame_pred == PRED_MEDIAN) {
|
if (c->frame_pred == PRED_MEDIAN) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width, avctx->height,
|
avctx->width, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_median_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width, avctx->height,
|
avctx->width, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
}
|
}
|
||||||
} else if (c->frame_pred == PRED_GRADIENT) {
|
} else if (c->frame_pred == PRED_GRADIENT) {
|
||||||
if (!c->interlaced) {
|
if (!c->interlaced) {
|
||||||
restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width, avctx->height,
|
avctx->width, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
} else {
|
} else {
|
||||||
restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
|
restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
|
||||||
avctx->width, avctx->height,
|
avctx->width, avctx->height,
|
||||||
c->slices, 0);
|
c->slices, 0);
|
||||||
}
|
}
|
||||||
@ -852,7 +852,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV420P10:
|
case AV_PIX_FMT_YUV420P10:
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
|
ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
|
||||||
avctx->width >> !!i, avctx->height >> !!i,
|
avctx->width >> !!i, avctx->height >> !!i,
|
||||||
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
|
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -861,7 +861,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV422P10:
|
case AV_PIX_FMT_YUV422P10:
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
|
ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
|
||||||
avctx->width >> !!i, avctx->height,
|
avctx->width >> !!i, avctx->height,
|
||||||
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
|
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -870,9 +870,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
frame.f->key_frame = 1;
|
frame->key_frame = 1;
|
||||||
frame.f->pict_type = AV_PICTURE_TYPE_I;
|
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
frame.f->interlaced_frame = !!c->interlaced;
|
frame->interlaced_frame = !!c->interlaced;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
|
@ -142,7 +142,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
V210DecContext *s = avctx->priv_data;
|
V210DecContext *s = avctx->priv_data;
|
||||||
ThreadData td;
|
ThreadData td;
|
||||||
int ret, stride, aligned_input;
|
int ret, stride, aligned_input;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
const uint8_t *psrc = avpkt->data;
|
const uint8_t *psrc = avpkt->data;
|
||||||
|
|
||||||
@ -177,7 +176,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
ff_v210dec_init(s);
|
ff_v210dec_init(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
@ -89,7 +89,6 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ThreadData td;
|
ThreadData td;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
uint8_t *src = avpkt->data;
|
uint8_t *src = avpkt->data;
|
||||||
int ret;
|
int ret;
|
||||||
@ -101,7 +100,7 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pic->key_frame = 1;
|
pic->key_frame = 1;
|
||||||
|
@ -18,14 +18,16 @@
|
|||||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "libavutil/frame.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "hwconfig.h"
|
#include "hwconfig.h"
|
||||||
#include "vaapi_decode.h"
|
#include "vaapi_decode.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "av1dec.h"
|
#include "av1dec.h"
|
||||||
|
#include "thread.h"
|
||||||
|
|
||||||
typedef struct VAAPIAV1FrameRef {
|
typedef struct VAAPIAV1FrameRef {
|
||||||
ThreadFrame frame;
|
AVFrame *frame;
|
||||||
int valid;
|
int valid;
|
||||||
} VAAPIAV1FrameRef;
|
} VAAPIAV1FrameRef;
|
||||||
|
|
||||||
@ -40,13 +42,13 @@ typedef struct VAAPIAV1DecContext {
|
|||||||
* used to apply film grain and push to downstream.
|
* used to apply film grain and push to downstream.
|
||||||
*/
|
*/
|
||||||
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
|
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
|
||||||
ThreadFrame tmp_frame;
|
AVFrame *tmp_frame;
|
||||||
} VAAPIAV1DecContext;
|
} VAAPIAV1DecContext;
|
||||||
|
|
||||||
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
|
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
|
||||||
{
|
{
|
||||||
if (vf)
|
if (vf)
|
||||||
return ff_vaapi_get_surface_id(vf->tf.f);
|
return ff_vaapi_get_surface_id(vf->f);
|
||||||
else
|
else
|
||||||
return VA_INVALID_SURFACE;
|
return VA_INVALID_SURFACE;
|
||||||
}
|
}
|
||||||
@ -73,16 +75,16 @@ static int vaapi_av1_decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
|
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
|
||||||
|
|
||||||
ctx->tmp_frame.f = av_frame_alloc();
|
ctx->tmp_frame = av_frame_alloc();
|
||||||
if (!ctx->tmp_frame.f) {
|
if (!ctx->tmp_frame) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Failed to allocate frame.\n");
|
"Failed to allocate frame.\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
|
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
|
||||||
ctx->ref_tab[i].frame.f = av_frame_alloc();
|
ctx->ref_tab[i].frame = av_frame_alloc();
|
||||||
if (!ctx->ref_tab[i].frame.f) {
|
if (!ctx->ref_tab[i].frame) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Failed to allocate reference table frame %d.\n", i);
|
"Failed to allocate reference table frame %d.\n", i);
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -97,14 +99,14 @@ static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
|
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
|
||||||
|
|
||||||
if (ctx->tmp_frame.f->buf[0])
|
if (ctx->tmp_frame->buf[0])
|
||||||
ff_thread_release_buffer(avctx, &ctx->tmp_frame);
|
ff_thread_release_buffer(avctx, ctx->tmp_frame);
|
||||||
av_frame_free(&ctx->tmp_frame.f);
|
av_frame_free(&ctx->tmp_frame);
|
||||||
|
|
||||||
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
|
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
|
||||||
if (ctx->ref_tab[i].frame.f->buf[0])
|
if (ctx->ref_tab[i].frame->buf[0])
|
||||||
ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
|
ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
|
||||||
av_frame_free(&ctx->ref_tab[i].frame.f);
|
av_frame_free(&ctx->ref_tab[i].frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ff_vaapi_decode_uninit(avctx);
|
return ff_vaapi_decode_uninit(avctx);
|
||||||
@ -135,12 +137,12 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (apply_grain) {
|
if (apply_grain) {
|
||||||
if (ctx->tmp_frame.f->buf[0])
|
if (ctx->tmp_frame->buf[0])
|
||||||
ff_thread_release_buffer(avctx, &ctx->tmp_frame);
|
ff_thread_release_buffer(avctx, ctx->tmp_frame);
|
||||||
err = ff_thread_get_buffer(avctx, &ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
|
err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f);
|
pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
|
||||||
} else {
|
} else {
|
||||||
pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
|
pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
|
||||||
}
|
}
|
||||||
@ -276,7 +278,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
|
|||||||
pic_param.ref_frame_map[i] = VA_INVALID_ID;
|
pic_param.ref_frame_map[i] = VA_INVALID_ID;
|
||||||
else
|
else
|
||||||
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
|
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
|
||||||
ff_vaapi_get_surface_id(ctx->ref_tab[i].frame.f) :
|
ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
|
||||||
vaapi_av1_surface_id(&s->ref[i]);
|
vaapi_av1_surface_id(&s->ref[i]);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
|
for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
|
||||||
@ -380,11 +382,11 @@ static int vaapi_av1_end_frame(AVCodecContext *avctx)
|
|||||||
|
|
||||||
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
|
||||||
if (header->refresh_frame_flags & (1 << i)) {
|
if (header->refresh_frame_flags & (1 << i)) {
|
||||||
if (ctx->ref_tab[i].frame.f->buf[0])
|
if (ctx->ref_tab[i].frame->buf[0])
|
||||||
ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
|
ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
|
||||||
|
|
||||||
if (apply_grain) {
|
if (apply_grain) {
|
||||||
ret = ff_thread_ref_frame(&ctx->ref_tab[i].frame, &ctx->tmp_frame);
|
ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ctx->ref_tab[i].valid = 1;
|
ctx->ref_tab[i].valid = 1;
|
||||||
|
@ -125,7 +125,6 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
int offset = 0;
|
int offset = 0;
|
||||||
int width_uv = avctx->width / 2, height_uv = avctx->height / 2;
|
int width_uv = avctx->width / 2, height_uv = avctx->height / 2;
|
||||||
int ret;
|
int ret;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
|
|
||||||
if (avpkt->size < 4 || avpkt->size - 4 > INT_MAX/8) {
|
if (avpkt->size < 4 || avpkt->size - 4 > INT_MAX/8) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid packet size\n");
|
av_log(avctx, AV_LOG_ERROR, "Invalid packet size\n");
|
||||||
@ -133,7 +132,7 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate buffer */
|
/* Allocate buffer */
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Set flags */
|
/* Set flags */
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
#include "libavutil/thread.h"
|
#include "libavutil/thread.h"
|
||||||
|
|
||||||
#include "h264pred.h"
|
#include "h264pred.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
#include "vp56.h"
|
#include "vp56.h"
|
||||||
#include "vp8dsp.h"
|
#include "vp8dsp.h"
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "vp9.h"
|
#include "vp9.h"
|
||||||
#include "thread.h"
|
#include "threadframe.h"
|
||||||
#include "vp56.h"
|
#include "vp56.h"
|
||||||
|
|
||||||
enum BlockPartition {
|
enum BlockPartition {
|
||||||
|
@ -568,8 +568,7 @@ static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
|
|||||||
img->frame->height = h;
|
img->frame->height = h;
|
||||||
|
|
||||||
if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
|
if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
|
||||||
ThreadFrame pt = { .f = img->frame };
|
ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
|
||||||
ret = ff_thread_get_buffer(s->avctx, &pt, 0);
|
|
||||||
} else
|
} else
|
||||||
ret = av_frame_get_buffer(img->frame, 1);
|
ret = av_frame_get_buffer(img->frame, 1);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -285,7 +285,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int TL[4] = { 128, 128, 128, 128 };
|
int TL[4] = { 128, 128, 128, 128 };
|
||||||
int L[4] = { 128, 128, 128, 128 };
|
int L[4] = { 128, 128, 128, 128 };
|
||||||
YLCContext *s = avctx->priv_data;
|
YLCContext *s = avctx->priv_data;
|
||||||
ThreadFrame frame = { .f = data };
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int ret, x, y, toffset, boffset;
|
int ret, x, y, toffset, boffset;
|
||||||
AVFrame * const p = data;
|
AVFrame * const p = data;
|
||||||
@ -307,7 +306,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
if (toffset >= boffset || boffset >= avpkt->size)
|
if (toffset >= boffset || boffset >= avpkt->size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
|
if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
av_fast_malloc(&s->buffer, &s->buffer_size,
|
av_fast_malloc(&s->buffer, &s->buffer_size,
|
||||||
|
Loading…
Reference in New Issue
Block a user