1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-01-08 13:22:53 +02:00

vp8: Add hwaccel hooks

Also adds some extra fields to the main context structure that may
be needed by a hwaccel decoder.
This commit is contained in:
Mark Thompson 2016-09-04 13:26:37 +01:00
parent 131a85a1fe
commit 4e528206bc
2 changed files with 157 additions and 60 deletions

View File

@ -64,16 +64,30 @@ static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
ff_thread_release_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
goto fail;
if (s->avctx->hwaccel) {
const AVHWAccel *hwaccel = s->avctx->hwaccel;
if (hwaccel->frame_priv_data_size) {
f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
if (!f->hwaccel_priv_buf)
goto fail;
f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
}
}
return 0;
fail:
av_buffer_unref(&f->seg_map);
ff_thread_release_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
}
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
{
av_buffer_unref(&f->seg_map);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
ff_thread_release_buffer(s->avctx, &f->tf);
}
@ -91,6 +105,12 @@ static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
vp8_release_frame(s, dst);
return AVERROR(ENOMEM);
}
if (src->hwaccel_picture_private) {
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
if (!dst->hwaccel_priv_buf)
return AVERROR(ENOMEM);
dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
}
return 0;
}
@ -132,7 +152,7 @@ static VP8Frame *vp8_find_free_buffer(VP8Context *s)
av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
abort();
}
if (frame->tf.f->data[0])
if (frame->tf.f->buf[0])
vp8_release_frame(s, frame);
return frame;
@ -209,8 +229,9 @@ static void parse_segment_info(VP8Context *s)
int i;
s->segmentation.update_map = vp8_rac_get(c);
s->segmentation.update_feature_data = vp8_rac_get(c);
if (vp8_rac_get(c)) { // update segment feature data
if (s->segmentation.update_feature_data) {
s->segmentation.absolute_vals = vp8_rac_get(c);
for (i = 0; i < 4; i++)
@ -264,11 +285,14 @@ static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
int size = AV_RL24(sizes + 3 * i);
if (buf_size - size < 0)
return -1;
s->coeff_partition_size[i] = size;
ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
buf += size;
buf_size -= size;
}
s->coeff_partition_size[i] = buf_size;
ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
return 0;
@ -298,28 +322,28 @@ static void get_quants(VP8Context *s)
VP56RangeCoder *c = &s->c;
int i, base_qi;
int yac_qi = vp8_rac_get_uint(c, 7);
int ydc_delta = vp8_rac_get_sint(c, 4);
int y2dc_delta = vp8_rac_get_sint(c, 4);
int y2ac_delta = vp8_rac_get_sint(c, 4);
int uvdc_delta = vp8_rac_get_sint(c, 4);
int uvac_delta = vp8_rac_get_sint(c, 4);
s->quant.yac_qi = vp8_rac_get_uint(c, 7);
s->quant.ydc_delta = vp8_rac_get_sint(c, 4);
s->quant.y2dc_delta = vp8_rac_get_sint(c, 4);
s->quant.y2ac_delta = vp8_rac_get_sint(c, 4);
s->quant.uvdc_delta = vp8_rac_get_sint(c, 4);
s->quant.uvac_delta = vp8_rac_get_sint(c, 4);
for (i = 0; i < 4; i++) {
if (s->segmentation.enabled) {
base_qi = s->segmentation.base_quant[i];
if (!s->segmentation.absolute_vals)
base_qi += yac_qi;
base_qi += s->quant.yac_qi;
} else
base_qi = yac_qi;
base_qi = s->quant.yac_qi;
s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.ydc_delta, 7)];
s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7)] * 2;
/* 101581>>16 is equivalent to 155/100 */
s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7)] * 101581 >> 16;
s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7)];
s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.uvac_delta, 7)];
s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
@ -637,6 +661,8 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
buf += 3;
buf_size -= 3;
s->header_partition_size = header_size;
if (s->profile > 3)
av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
@ -700,9 +726,11 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
s->filter.level = vp8_rac_get_uint(c, 6);
s->filter.sharpness = vp8_rac_get_uint(c, 3);
if ((s->lf_delta.enabled = vp8_rac_get(c)))
if (vp8_rac_get(c))
if ((s->lf_delta.enabled = vp8_rac_get(c))) {
s->lf_delta.update = vp8_rac_get(c);
if (s->lf_delta.update)
update_lf_deltas(s);
}
if (setup_partitions(s, buf, buf_size)) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
@ -741,6 +769,13 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP8_MVC_SIZE);
}
// Record the entropy coder state here so that hwaccels can use it.
s->c.code_word = vp56_rac_renorm(&s->c);
s->coder_state_at_header_end.input = s->c.buffer - (-s->c.bits / 8);
s->coder_state_at_header_end.range = s->c.high;
s->coder_state_at_header_end.value = s->c.code_word >> 16;
s->coder_state_at_header_end.bit_count = -s->c.bits % 8;
return 0;
}
@ -2462,7 +2497,6 @@ static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
}
static av_always_inline
int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt, int is_vp7)
@ -2480,6 +2514,20 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (ret < 0)
goto err;
if (!is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) {
enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE,
};
s->pix_fmt = ff_get_format(s->avctx, pix_fmts);
if (s->pix_fmt < 0) {
ret = AVERROR(EINVAL);
goto err;
}
avctx->pix_fmt = s->pix_fmt;
}
prev_frame = s->framep[VP56_FRAME_CURRENT];
referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
@ -2555,51 +2603,67 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_thread_finish_setup(avctx);
s->linesize = curframe->tf.f->linesize[0];
s->uvlinesize = curframe->tf.f->linesize[1];
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
if (ret < 0)
goto err;
memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
/* Zero macroblock structures for top/top-left prediction
* from outside the frame. */
if (!s->mb_layout)
memset(s->macroblocks + s->mb_height * 2 - 1, 0,
(s->mb_width + 1) * sizeof(*s->macroblocks));
if (!s->mb_layout && s->keyframe)
memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
if (ret < 0)
goto err;
memset(s->ref_count, 0, sizeof(s->ref_count));
ret = avctx->hwaccel->end_frame(avctx);
if (ret < 0)
goto err;
if (s->mb_layout == 1) {
// Make sure the previous frame has read its segmentation map,
// if we re-use the same map.
if (prev_frame && s->segmentation.enabled &&
!s->segmentation.update_map)
ff_thread_await_progress(&prev_frame->tf, 1, 0);
if (is_vp7)
vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
} else {
s->linesize = curframe->tf.f->linesize[0];
s->uvlinesize = curframe->tf.f->linesize[1];
memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
/* Zero macroblock structures for top/top-left prediction
* from outside the frame. */
if (!s->mb_layout)
memset(s->macroblocks + s->mb_height * 2 - 1, 0,
(s->mb_width + 1) * sizeof(*s->macroblocks));
if (!s->mb_layout && s->keyframe)
memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
memset(s->ref_count, 0, sizeof(s->ref_count));
if (s->mb_layout == 1) {
// Make sure the previous frame has read its segmentation map,
// if we re-use the same map.
if (prev_frame && s->segmentation.enabled &&
!s->segmentation.update_map)
ff_thread_await_progress(&prev_frame->tf, 1, 0);
if (is_vp7)
vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
else
vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
}
if (avctx->active_thread_type == FF_THREAD_FRAME)
num_jobs = 1;
else
vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
}
num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
s->num_jobs = num_jobs;
s->curframe = curframe;
s->prev_frame = prev_frame;
s->mv_min.y = -MARGIN;
s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
for (i = 0; i < MAX_THREADS; i++) {
s->thread_data[i].thread_mb_pos = 0;
s->thread_data[i].wait_mb_pos = INT_MAX;
}
if (avctx->active_thread_type == FF_THREAD_FRAME)
num_jobs = 1;
else
num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
s->num_jobs = num_jobs;
s->curframe = curframe;
s->prev_frame = prev_frame;
s->mv_min.y = -MARGIN;
s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
for (i = 0; i < MAX_THREADS; i++) {
s->thread_data[i].thread_mb_pos = 0;
s->thread_data[i].wait_mb_pos = INT_MAX;
if (is_vp7)
avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
else
avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
}
if (is_vp7)
avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
else
avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL,
num_jobs);
ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
@ -2666,6 +2730,7 @@ int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
int ret;
s->avctx = avctx;
s->pix_fmt = AV_PIX_FMT_NONE;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->internal->allocate_progress = 1;

View File

@ -130,12 +130,17 @@ typedef struct VP8ThreadData {
typedef struct VP8Frame {
ThreadFrame tf;
AVBufferRef *seg_map;
AVBufferRef *hwaccel_priv_buf;
void *hwaccel_picture_private;
} VP8Frame;
#define MAX_THREADS 8
typedef struct VP8Context {
VP8ThreadData *thread_data;
AVCodecContext *avctx;
enum AVPixelFormat pix_fmt;
VP8Frame *framep[4];
VP8Frame *next_framep[4];
VP8Frame *curframe;
@ -165,6 +170,7 @@ typedef struct VP8Context {
uint8_t enabled;
uint8_t absolute_vals;
uint8_t update_map;
uint8_t update_feature_data;
int8_t base_quant[4];
int8_t filter_level[4]; ///< base loop filter level
} segmentation;
@ -192,8 +198,19 @@ typedef struct VP8Context {
int16_t chroma_qmul[2];
} qmat[4];
// Raw quantisation values, which may be needed by hwaccel decode.
struct {
int yac_qi;
int ydc_delta;
int y2dc_delta;
int y2ac_delta;
int uvdc_delta;
int uvac_delta;
} quant;
struct {
uint8_t enabled; ///< whether each mb can have a different strength based on mode/ref
uint8_t update;
/**
* filter strength adjustment for the following macroblock modes:
@ -221,6 +238,20 @@ typedef struct VP8Context {
VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
/* This contains the entropy coder state at the end of the header
* block, in the form specified by the standard. For use by
* hwaccels, so that a hardware decoder has the information to
* start decoding at the macroblock layer.
*/
struct {
const uint8_t *input;
uint32_t range;
uint32_t value;
int bit_count;
} coder_state_at_header_end;
int header_partition_size;
/**
* These are all of the updatable probabilities for binary decisions.
* They are only implicitly reset on keyframes, making it quite likely
@ -258,6 +289,7 @@ typedef struct VP8Context {
*/
int num_coeff_partitions;
VP56RangeCoder coeff_partition[8];
int coeff_partition_size[8];
VideoDSPContext vdsp;
VP8DSPContext vp8dsp;
H264PredContext hpc;