1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

vc1dec: redesign the intensity compensation

The existing implementation had little to do with VC1.
This could be implemented by adjusting the reference frames
ithemselfs but that would make frame multi-threading difficult.

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-04-24 20:40:12 +02:00
parent 73050df240
commit 782ebd6118
3 changed files with 112 additions and 37 deletions

View File

@ -576,8 +576,27 @@ int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex
return 0;
}
static void rotate_luts(VC1Context *v)
{
#define ROTATE(DEF, L, N, C, A) do {\
if (v->s.pict_type == AV_PICTURE_TYPE_BI || v->s.pict_type == AV_PICTURE_TYPE_B) {\
C = A;\
} else {\
DEF;\
memcpy(&tmp, &L , sizeof(tmp));\
memcpy(&L , &N , sizeof(tmp));\
memcpy(&N , &tmp, sizeof(tmp));\
C = N;\
}\
}while(0)
ROTATE(int tmp , v->last_use_ic, v->next_use_ic, v->curr_use_ic, v->aux_use_ic);
ROTATE(uint8_t tmp[2][256], v->last_luty , v->next_luty , v->curr_luty , v->aux_luty);
ROTATE(uint8_t tmp[2][256], v->last_lutuv, v->next_lutuv, v->curr_lutuv, v->aux_lutuv);
}
/* fill lookup tables for intensity compensation */
#define INIT_LUT(lumscale, lumshift, luty, lutuv) do {\
#define INIT_LUT(lumscale, lumshift, luty, lutuv, chain) do {\
int scale, shift, i; \
if (!lumscale) { \
scale = -64; \
@ -592,11 +611,14 @@ int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex
shift = lumshift << 6; \
} \
for (i = 0; i < 256; i++) { \
luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); \
lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); \
int iy = chain ? luty[i] : i; \
int iu = chain ? lutuv[i] : i; \
luty[i] = av_clip_uint8((scale * iy + shift + 32) >> 6); \
lutuv[i] = av_clip_uint8((scale * (iu - 128) + 128*64 + 32) >> 6); \
} \
}while(0)
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
{
int pqindex, lowquant, status;
@ -685,8 +707,12 @@ int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
(v->s.pict_type == AV_PICTURE_TYPE_P) ? 'P' : ((v->s.pict_type == AV_PICTURE_TYPE_I) ? 'I' : 'B'),
pqindex, v->pq, v->halfpq, v->rangeredfrm);
if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P)
v->use_ic = 0;
if(v->first_pic_header_flag) {
rotate_luts(v);
INIT_LUT(32, 0 , v->curr_luty[0] , v->curr_lutuv[0] , 0);
INIT_LUT(32, 0 , v->curr_luty[1] , v->curr_lutuv[1] , 0);
v->curr_use_ic = 0;
}
switch (v->s.pict_type) {
case AV_PICTURE_TYPE_P:
@ -700,9 +726,10 @@ int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)];
v->lumscale = get_bits(gb, 6);
v->lumshift = get_bits(gb, 6);
v->use_ic = 1;
v->last_use_ic = 1;
/* fill lookup tables for intensity compensation */
INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv);
INIT_LUT(v->lumscale, v->lumshift , v->last_luty[0] , v->last_lutuv[0] , 1);
INIT_LUT(v->lumscale, v->lumshift , v->last_luty[1] , v->last_lutuv[1] , 1);
}
v->qs_last = v->s.quarter_sample;
if (v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
@ -957,12 +984,16 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
if (v->postprocflag)
v->postproc = get_bits(gb, 2);
if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P)
v->use_ic = 0;
if (v->parse_only)
return 0;
if(v->first_pic_header_flag) {
rotate_luts(v);
INIT_LUT(32, 0 , v->curr_luty[0] , v->curr_lutuv[0] , 0);
INIT_LUT(32, 0 , v->curr_luty[1] , v->curr_lutuv[1] , 0);
v->curr_use_ic = 0;
}
switch (v->s.pict_type) {
case AV_PICTURE_TYPE_I:
case AV_PICTURE_TYPE_BI:
@ -1013,7 +1044,9 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
if (v->intcomp) {
v->lumscale = get_bits(gb, 6);
v->lumshift = get_bits(gb, 6);
INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv);
INIT_LUT(v->lumscale, v->lumshift, v->last_luty[0], v->last_lutuv[0], 1);
INIT_LUT(v->lumscale, v->lumshift, v->last_luty[1], v->last_lutuv[1], 1);
v->last_use_ic = 1;
}
status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
av_log(v->s.avctx, AV_LOG_DEBUG, "SKIPMB plane encoding: "
@ -1056,17 +1089,38 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
int mvmode2;
mvmode2 = get_unary(gb, 1, 3);
v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][mvmode2];
if (v->field_mode)
v->intcompfield = decode210(gb);
if (v->field_mode) {
v->intcompfield = decode210(gb)^3;
} else
v->intcompfield = 3;
v->lumscale2 = v->lumscale = 32;
v->lumshift2 = v->lumshift = 0;
if (v->intcompfield & 1) {
v->lumscale = get_bits(gb, 6);
v->lumshift = get_bits(gb, 6);
INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv);
if ((v->field_mode) && !v->intcompfield) {
}
if ((v->intcompfield & 2) && v->field_mode) {
v->lumscale2 = get_bits(gb, 6);
v->lumshift2 = get_bits(gb, 6);
INIT_LUT(v->lumscale2, v->lumshift2, v->luty2, v->lutuv2);
} else if(!v->field_mode) {
v->lumscale2 = v->lumscale;
v->lumshift2 = v->lumshift;
}
v->use_ic = 1;
if (v->field_mode && v->second_field) {
if (v->cur_field_type) {
INIT_LUT(v->lumscale , v->lumshift , v->curr_luty[v->cur_field_type^1], v->curr_lutuv[v->cur_field_type^1], 0);
INIT_LUT(v->lumscale2, v->lumshift2, v->last_luty[v->cur_field_type ], v->last_lutuv[v->cur_field_type ], 1);
} else {
INIT_LUT(v->lumscale2, v->lumshift2, v->curr_luty[v->cur_field_type^1], v->curr_lutuv[v->cur_field_type^1], 0);
INIT_LUT(v->lumscale , v->lumshift , v->last_luty[v->cur_field_type ], v->last_lutuv[v->cur_field_type ], 1);
}
v->curr_use_ic = 1;
} else {
INIT_LUT(v->lumscale , v->lumshift , v->last_luty[0], v->last_lutuv[0], 1);
INIT_LUT(v->lumscale2, v->lumshift2, v->last_luty[1], v->last_lutuv[1], 1);
}
v->last_use_ic = 1;
}
v->qs_last = v->s.quarter_sample;
if (v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)

View File

@ -296,8 +296,11 @@ typedef struct VC1Context{
int dmb_is_raw; ///< direct mb plane is raw
int fmb_is_raw; ///< forward mb plane is raw
int skip_is_raw; ///< skip mb plane is not coded
uint8_t luty[256], lutuv[256]; ///< lookup tables used for intensity compensation
int use_ic; ///< use intensity compensation in B-frames
uint8_t last_luty[2][256], last_lutuv[2][256]; ///< lookup tables used for intensity compensation
uint8_t aux_luty[2][256], aux_lutuv[2][256]; ///< lookup tables used for intensity compensation
uint8_t next_luty[2][256], next_lutuv[2][256]; ///< lookup tables used for intensity compensation
uint8_t (*curr_luty)[256] ,(*curr_lutuv)[256];
int last_use_ic, curr_use_ic, next_use_ic, aux_use_ic;
int rnd; ///< rounding control
/** Frame decoding info for S/M profiles only */
@ -340,7 +343,6 @@ typedef struct VC1Context{
int intcomp;
uint8_t lumscale2; ///< for interlaced field P picture
uint8_t lumshift2;
uint8_t luty2[256], lutuv2[256]; // lookup tables used for intensity compensation
VLC* mbmode_vlc;
VLC* imv_vlc;
VLC* twomvbp_vlc;

View File

@ -351,6 +351,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
int off, off_uv;
int v_edge_pos = s->v_edge_pos >> v->field_mode;
int i;
const uint8_t *luty, *lutuv;
if ((!v->field_mode ||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
@ -389,15 +390,21 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
srcY = s->current_picture.f.data[0];
srcU = s->current_picture.f.data[1];
srcV = s->current_picture.f.data[2];
luty = v->curr_luty [v->ref_field_type[dir]];
lutuv= v->curr_lutuv[v->ref_field_type[dir]];
} else {
srcY = s->last_picture.f.data[0];
srcU = s->last_picture.f.data[1];
srcV = s->last_picture.f.data[2];
luty = v->last_luty [v->ref_field_type[dir]];
lutuv= v->last_lutuv[v->ref_field_type[dir]];
}
} else {
srcY = s->next_picture.f.data[0];
srcU = s->next_picture.f.data[1];
srcV = s->next_picture.f.data[2];
luty = v->next_luty [v->ref_field_type[dir]];
lutuv= v->next_lutuv[v->ref_field_type[dir]];
}
if(!srcY)
@ -484,15 +491,15 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
src = srcY;
for (j = 0; j < 17 + s->mspel * 2; j++) {
for (i = 0; i < 17 + s->mspel * 2; i++)
src[i] = v->luty[src[i]];
src[i] = luty[src[i]];
src += s->linesize;
}
src = srcU;
src2 = srcV;
for (j = 0; j < 9; j++) {
for (i = 0; i < 9; i++) {
src[i] = v->lutuv[src[i]];
src2[i] = v->lutuv[src2[i]];
src[i] = lutuv[src[i]];
src2[i] = lutuv[src2[i]];
}
src += s->uvlinesize;
src2 += s->uvlinesize;
@ -552,6 +559,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
int off;
int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
int v_edge_pos = s->v_edge_pos >> v->field_mode;
const uint8_t *luty;
if ((!v->field_mode ||
(v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
@ -564,10 +572,15 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
if (!dir) {
if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
srcY = s->current_picture.f.data[0];
} else
luty = v->curr_luty[v->ref_field_type[dir]];
} else {
srcY = s->last_picture.f.data[0];
} else
luty = v->last_luty[v->ref_field_type[dir]];
}
} else {
srcY = s->next_picture.f.data[0];
luty = v->next_luty[v->ref_field_type[dir]];
}
if(!srcY)
return;
@ -699,7 +712,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
src = srcY;
for (j = 0; j < 9 + s->mspel * 2; j++) {
for (i = 0; i < 9 + s->mspel * 2; i++)
src[i] = v->luty[src[i]];
src[i] = luty[src[i]];
src += s->linesize << fieldmv;
}
}
@ -787,6 +800,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
int valid_count;
int chroma_ref_type = v->cur_field_type, off = 0;
int v_edge_pos = s->v_edge_pos >> v->field_mode;
const uint8_t *lutuv;
if (!v->field_mode && !v->s.last_picture.f.data[0])
return;
@ -852,13 +866,16 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
srcU = s->current_picture.f.data[1];
srcV = s->current_picture.f.data[2];
lutuv= v->curr_lutuv[chroma_ref_type];
} else {
srcU = s->last_picture.f.data[1];
srcV = s->last_picture.f.data[2];
lutuv= v->last_lutuv[chroma_ref_type];
}
} else {
srcU = s->next_picture.f.data[1];
srcV = s->next_picture.f.data[2];
lutuv= v->next_lutuv[chroma_ref_type];
}
if(!srcU)
@ -913,8 +930,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
src2 = srcV;
for (j = 0; j < 9; j++) {
for (i = 0; i < 9; i++) {
src[i] = v->lutuv[src[i]];
src2[i] = v->lutuv[src2[i]];
src[i] = lutuv[src[i]];
src2[i] = lutuv[src2[i]];
}
src += s->uvlinesize;
src2 += s->uvlinesize;
@ -998,13 +1015,14 @@ static void vc1_mc_4mv_chroma4(VC1Context *v)
if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
int i, j;
uint8_t *src, *src2;
const uint8_t *lutuv = v->last_lutuv[v->ref_field_type[0]];
src = srcU;
src2 = srcV;
for (j = 0; j < 5; j++) {
for (i = 0; i < 5; i++) {
src[i] = v->lutuv[src[i]];
src2[i] = v->lutuv[src2[i]];
src[i] = lutuv[src[i]];
src2[i] = lutuv[src2[i]];
}
src += s->uvlinesize << 1;
src2 += s->uvlinesize << 1;
@ -1998,29 +2016,30 @@ static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
int direct, int mode)
{
if (v->use_ic) {
int use_ic = v->next_use_ic || v->curr_use_ic || v->last_use_ic;
if (use_ic) {
v->mv_mode2 = v->mv_mode;
v->mv_mode = MV_PMODE_INTENSITY_COMP;
}
if (direct) {
vc1_mc_1mv(v, 0);
vc1_interp_mc(v);
if (v->use_ic)
if (use_ic)
v->mv_mode = v->mv_mode2;
return;
}
if (mode == BMV_TYPE_INTERPOLATED) {
vc1_mc_1mv(v, 0);
vc1_interp_mc(v);
if (v->use_ic)
if (use_ic)
v->mv_mode = v->mv_mode2;
return;
}
if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
if (use_ic && (mode == BMV_TYPE_BACKWARD))
v->mv_mode = v->mv_mode2;
vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
if (v->use_ic)
if (use_ic)
v->mv_mode = v->mv_mode2;
}