diff --git a/libavcodec/x86/rv34dsp.asm b/libavcodec/x86/rv34dsp.asm index 2d2f6e19e6..32bcdced8a 100644 --- a/libavcodec/x86/rv34dsp.asm +++ b/libavcodec/x86/rv34dsp.asm @@ -22,6 +22,16 @@ %include "x86inc.asm" %include "x86util.asm" +SECTION_RODATA +pw_row_coeffs: times 4 dw 13 + times 4 dw 17 + times 4 dw 7 +pd_512: times 2 dd 0x200 +pw_col_coeffs: dw 13, 13, 13, -13 + dw 17, 7, 7, -17 + dw 13, -13, 13, 13 + dw -7, 17, -17, -7 + SECTION .text %macro IDCT_DC_NOROUND 1 @@ -88,6 +98,74 @@ cglobal rv34_idct_dc_add_mmx, 3, 3 movh [r2+r1], m5 RET +; Load coeffs and perform row transform +; Output: coeffs in mm[0467], rounder in mm5 +%macro ROW_TRANSFORM 1 + pxor mm7, mm7 + mova mm0, [%1+ 0*8] + mova mm1, [%1+ 1*8] + mova mm2, [%1+ 2*8] + mova mm3, [%1+ 3*8] + mova [%1+ 0*8], mm7 + mova [%1+ 1*8], mm7 + mova [%1+ 2*8], mm7 + mova [%1+ 3*8], mm7 + mova mm4, mm0 + mova mm6, [pw_row_coeffs+ 0] + paddsw mm0, mm2 ; b0 + b2 + psubsw mm4, mm2 ; b0 - b2 + pmullw mm0, mm6 ; *13 = z0 + pmullw mm4, mm6 ; *13 = z1 + mova mm5, mm1 + pmullw mm1, [pw_row_coeffs+ 8] ; b1*17 + pmullw mm5, [pw_row_coeffs+16] ; b1* 7 + mova mm7, mm3 + pmullw mm3, [pw_row_coeffs+ 8] ; b3*17 + pmullw mm7, [pw_row_coeffs+16] ; b3* 7 + paddsw mm1, mm7 ; z3 = b1*17 + b3* 7 + psubsw mm5, mm3 ; z2 = b1* 7 - b3*17 + mova mm7, mm0 + mova mm6, mm4 + paddsw mm0, mm1 ; z0 + z3 + psubsw mm7, mm1 ; z0 - z3 + paddsw mm4, mm5 ; z1 + z2 + psubsw mm6, mm5 ; z1 - z2 + mova mm5, [pd_512] ; 0x200 +%endmacro + +; ff_rv34_idct_add_mmx2(uint8_t *dst, ptrdiff_t stride, DCTELEM *block); +%macro COL_TRANSFORM 4 + pshufw mm3, %2, 0xDD ; col. 1,3,1,3 + pshufw %2, %2, 0x88 ; col. 0,2,0,2 + pmaddwd %2, %3 ; 13*c0+13*c2 | 13*c0-13*c2 = z0 | z1 + pmaddwd mm3, %4 ; 17*c1+ 7*c3 | 7*c1-17*c3 = z3 | z2 + paddd %2, mm5 + pshufw mm1, %2, 01001110b ; z1 | z0 + pshufw mm2, mm3, 01001110b ; z2 | z3 + paddd %2, mm3 ; z0+z3 | z1+z2 + psubd mm1, mm2 ; z1-z2 | z0-z3 + movd mm3, %1 + psrad %2, 10 + pxor mm2, mm2 + psrad mm1, 10 + punpcklbw mm3, mm2 + packssdw %2, mm1 + paddw %2, mm3 + packuswb %2, %2 + movd %1, %2 +%endmacro +INIT_MMX mmx2 +cglobal rv34_idct_add, 3,3,0, d, s, b + ROW_TRANSFORM bq + COL_TRANSFORM [dq], mm0, [pw_col_coeffs+ 0], [pw_col_coeffs+ 8] + mova mm0, [pw_col_coeffs+ 0] + COL_TRANSFORM [dq+sq], mm4, mm0, [pw_col_coeffs+ 8] + mova mm4, [pw_col_coeffs+ 8] + lea dq, [dq + 2*sq] + COL_TRANSFORM [dq], mm6, mm0, mm4 + COL_TRANSFORM [dq+sq], mm7, mm0, mm4 + ret + ; ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc); INIT_XMM cglobal rv34_idct_dc_add_sse4, 3, 3, 6 diff --git a/libavcodec/x86/rv34dsp_init.c b/libavcodec/x86/rv34dsp_init.c index 38831255c9..d91818c375 100644 --- a/libavcodec/x86/rv34dsp_init.c +++ b/libavcodec/x86/rv34dsp_init.c @@ -28,6 +28,7 @@ void ff_rv34_idct_dc_mmx2(DCTELEM *block); void ff_rv34_idct_dc_noround_mmx2(DCTELEM *block); void ff_rv34_idct_dc_add_mmx(uint8_t *dst, ptrdiff_t stride, int dc); void ff_rv34_idct_dc_add_sse4(uint8_t *dst, ptrdiff_t stride, int dc); +void ff_rv34_idct_add_mmx2(uint8_t *dst, ptrdiff_t stride, DCTELEM *block); av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp) { @@ -38,6 +39,7 @@ av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp) c->rv34_idct_dc_add = ff_rv34_idct_dc_add_mmx; if (mm_flags & AV_CPU_FLAG_MMX2) { c->rv34_inv_transform_dc = ff_rv34_idct_dc_noround_mmx2; + c->rv34_idct_add = ff_rv34_idct_add_mmx2; } if (mm_flags & AV_CPU_FLAG_SSE4) c->rv34_idct_dc_add = ff_rv34_idct_dc_add_sse4;