mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
VP8: optimize DC-only chroma case in the same way as luma.
Add MMX idct_dc_add4uv function for this case. ~40% faster chroma idct. Originally committed as revision 24455 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
3df56f4118
commit
3ae079a3c8
@ -1206,7 +1206,7 @@ static void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s->vp8dsp.vp8_idct_dc_add4(y_dst, s->block[y], s->linesize);
|
||||
s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
|
||||
}
|
||||
}
|
||||
y_dst += 4*s->linesize;
|
||||
@ -1214,19 +1214,24 @@ static void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
|
||||
}
|
||||
|
||||
for (ch = 0; ch < 2; ch++) {
|
||||
if (AV_RN32A(s->non_zero_count_cache[4+ch])) {
|
||||
uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]);
|
||||
if (nnz4) {
|
||||
uint8_t *ch_dst = dst[1+ch];
|
||||
for (y = 0; y < 2; y++) {
|
||||
for (x = 0; x < 2; x++) {
|
||||
int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
|
||||
if (nnz) {
|
||||
if (nnz == 1)
|
||||
s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
else
|
||||
s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
if (nnz4&~0x01010101) {
|
||||
for (y = 0; y < 2; y++) {
|
||||
for (x = 0; x < 2; x++) {
|
||||
int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
|
||||
if (nnz) {
|
||||
if (nnz == 1)
|
||||
s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
else
|
||||
s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
|
||||
}
|
||||
}
|
||||
ch_dst += 4*s->uvlinesize;
|
||||
}
|
||||
ch_dst += 4*s->uvlinesize;
|
||||
} else {
|
||||
s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,24 +109,20 @@ static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], int stride)
|
||||
}
|
||||
}
|
||||
|
||||
static void vp8_idct_dc_add4_c(uint8_t *dst, DCTELEM block[4][16], int stride)
|
||||
static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], int stride)
|
||||
{
|
||||
int i, j;
|
||||
for (j = 0; j < 4; j++) {
|
||||
uint8_t *pix = dst+j*4;
|
||||
int dc = (block[j][0] + 4) >> 3;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc;
|
||||
block[j][0] = 0;
|
||||
if (!dc)
|
||||
continue;
|
||||
for (i = 0; i < 4; i++) {
|
||||
pix[0] = cm[pix[0]];
|
||||
pix[1] = cm[pix[1]];
|
||||
pix[2] = cm[pix[2]];
|
||||
pix[3] = cm[pix[3]];
|
||||
pix += stride;
|
||||
}
|
||||
}
|
||||
vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride);
|
||||
vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride);
|
||||
vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride);
|
||||
vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride);
|
||||
}
|
||||
|
||||
static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], int stride)
|
||||
{
|
||||
vp8_idct_dc_add_c(dst+ 0, block[0], stride);
|
||||
vp8_idct_dc_add_c(dst+ 4, block[1], stride);
|
||||
vp8_idct_dc_add_c(dst+ 8, block[2], stride);
|
||||
vp8_idct_dc_add_c(dst+12, block[3], stride);
|
||||
}
|
||||
|
||||
// because I like only having two parameters to pass functions...
|
||||
@ -479,10 +475,11 @@ VP8_BILINEAR(4)
|
||||
|
||||
av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
|
||||
{
|
||||
dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
|
||||
dsp->vp8_idct_add = vp8_idct_add_c;
|
||||
dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
|
||||
dsp->vp8_idct_dc_add4 = vp8_idct_dc_add4_c;
|
||||
dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
|
||||
dsp->vp8_idct_add = vp8_idct_add_c;
|
||||
dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
|
||||
dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c;
|
||||
dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c;
|
||||
|
||||
dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
|
||||
dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
|
||||
|
@ -33,7 +33,8 @@ typedef struct VP8DSPContext {
|
||||
void (*vp8_luma_dc_wht)(DCTELEM block[4][4][16], DCTELEM dc[16]);
|
||||
void (*vp8_idct_add)(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
void (*vp8_idct_dc_add)(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
void (*vp8_idct_dc_add4)(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
void (*vp8_idct_dc_add4y)(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
void (*vp8_idct_dc_add4uv)(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
|
||||
// loop filter applied to edges between macroblocks
|
||||
void (*vp8_v_loop_filter16y)(uint8_t *dst, int stride,
|
||||
|
@ -220,8 +220,9 @@ HVBILIN(ssse3, 8, 16, 16)
|
||||
|
||||
extern void ff_vp8_idct_dc_add_mmx(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
extern void ff_vp8_idct_dc_add_sse4(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
extern void ff_vp8_idct_dc_add4_mmx(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
extern void ff_vp8_idct_dc_add4_sse2(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
extern void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
extern void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
extern void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, DCTELEM block[2][16], int stride);
|
||||
extern void ff_vp8_luma_dc_wht_mmx(DCTELEM block[4][4][16], DCTELEM dc[16]);
|
||||
extern void ff_vp8_idct_add_mmx(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
extern void ff_vp8_idct_add_sse(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
@ -284,10 +285,11 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
|
||||
|
||||
#if HAVE_YASM
|
||||
if (mm_flags & FF_MM_MMX) {
|
||||
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
|
||||
c->vp8_idct_dc_add4 = ff_vp8_idct_dc_add4_mmx;
|
||||
c->vp8_idct_add = ff_vp8_idct_add_mmx;
|
||||
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
|
||||
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
|
||||
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
|
||||
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
|
||||
c->vp8_idct_add = ff_vp8_idct_add_mmx;
|
||||
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
|
||||
c->put_vp8_epel_pixels_tab[0][0][0] =
|
||||
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
|
||||
c->put_vp8_epel_pixels_tab[1][0][0] =
|
||||
@ -354,7 +356,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
|
||||
}
|
||||
|
||||
if (mm_flags & FF_MM_SSE2) {
|
||||
c->vp8_idct_dc_add4 = ff_vp8_idct_dc_add4_sse2;
|
||||
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
|
||||
|
||||
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
|
||||
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
|
||||
|
@ -976,11 +976,11 @@ cglobal vp8_idct_dc_add_sse4, 3, 3, 6
|
||||
RET
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vp8_idct_dc_add4_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
;-----------------------------------------------------------------------------
|
||||
|
||||
INIT_MMX
|
||||
cglobal vp8_idct_dc_add4_mmx, 3, 3
|
||||
cglobal vp8_idct_dc_add4y_mmx, 3, 3
|
||||
; load data
|
||||
movd m0, [r1+32*0] ; A
|
||||
movd m1, [r1+32*2] ; C
|
||||
@ -1015,7 +1015,7 @@ cglobal vp8_idct_dc_add4_mmx, 3, 3
|
||||
RET
|
||||
|
||||
INIT_XMM
|
||||
cglobal vp8_idct_dc_add4_sse2, 3, 3
|
||||
cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
|
||||
; load data
|
||||
movd m0, [r1+32*0] ; A
|
||||
movd m1, [r1+32*2] ; C
|
||||
@ -1044,6 +1044,47 @@ cglobal vp8_idct_dc_add4_sse2, 3, 3
|
||||
ADD_DC m0, m1, 0, mova
|
||||
RET
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
|
||||
;-----------------------------------------------------------------------------
|
||||
|
||||
INIT_MMX
|
||||
cglobal vp8_idct_dc_add4uv_mmx, 3, 3
|
||||
; load data
|
||||
movd m0, [r1+32*0] ; A
|
||||
movd m1, [r1+32*2] ; C
|
||||
punpcklwd m0, [r1+32*1] ; A B
|
||||
punpcklwd m1, [r1+32*3] ; C D
|
||||
punpckldq m0, m1 ; A B C D
|
||||
pxor m6, m6
|
||||
|
||||
; calculate DC
|
||||
paddw m0, [pw_4]
|
||||
movd [r1+32*0], m6
|
||||
movd [r1+32*1], m6
|
||||
movd [r1+32*2], m6
|
||||
movd [r1+32*3], m6
|
||||
psraw m0, 3
|
||||
psubw m6, m0
|
||||
packuswb m0, m0
|
||||
packuswb m6, m6
|
||||
punpcklbw m0, m0 ; AABBCCDD
|
||||
punpcklbw m6, m6 ; AABBCCDD
|
||||
movq m1, m0
|
||||
movq m7, m6
|
||||
punpcklbw m0, m0 ; AAAABBBB
|
||||
punpckhbw m1, m1 ; CCCCDDDD
|
||||
punpcklbw m6, m6 ; AAAABBBB
|
||||
punpckhbw m7, m7 ; CCCCDDDD
|
||||
|
||||
; add DC
|
||||
lea r1, [r0+r2*2]
|
||||
ADD_DC m0, m6, 0, mova
|
||||
lea r0, [r0+r2*4]
|
||||
lea r1, [r1+r2*4]
|
||||
ADD_DC m1, m7, 0, mova
|
||||
RET
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride);
|
||||
;-----------------------------------------------------------------------------
|
||||
|
Loading…
Reference in New Issue
Block a user