1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-28 20:53:54 +02:00

Merge remote-tracking branch 'rbultje/vp9-simd'

* rbultje/vp9-simd:
  vp9: implement top/left half (4x4) sub-8x8-IDCT.
  vp9: split pre-load of 11585x2 out of 1d idct macro.
  vp9: minor refactorings in idct ssse3 assembly.
  vp9: split x86 assembly in two files.

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-12-07 19:00:03 +01:00
commit d042c8251d
3 changed files with 364 additions and 304 deletions

View File

@ -101,5 +101,6 @@ YASM-OBJS-$(CONFIG_VP3DSP) += x86/vp3dsp.o
YASM-OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp.o
YASM-OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp.o \
x86/vp8dsp_loopfilter.o
YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp.o
YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9itxfm.o \
x86/vp9mc.o
YASM-OBJS-$(CONFIG_WEBP_DECODER) += x86/vp8dsp.o

View File

@ -1,7 +1,7 @@
;******************************************************************************
;* VP9 SIMD optimizations
;* VP9 IDCT SIMD optimizations
;*
;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
;* Copyright (C) 2013 Clément Bœsch <u pkh me>
;*
;* This file is part of FFmpeg.
;*
@ -24,65 +24,6 @@
SECTION_RODATA
; FIXME share with vp8dsp.asm
pw_256: times 8 dw 256
%macro F8_TAPS 8
times 8 db %1, %2
times 8 db %3, %4
times 8 db %5, %6
times 8 db %7, %8
%endmacro
; int8_t ff_filters_ssse3[3][15][4][16]
const filters_ssse3 ; smooth
F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
; regular
F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
; sharp
F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
pw_11585x2: times 8 dw 23170
%macro VP9_IDCT_COEFFS 2
@ -98,7 +39,7 @@ VP9_IDCT_COEFFS %1, %2
VP9_IDCT_COEFFS_ALL 15137, 6270
VP9_IDCT_COEFFS_ALL 16069, 3196
VP9_IDCT_COEFFS 9102, 13623
VP9_IDCT_COEFFS_ALL 9102, 13623
pd_8192: times 4 dd 8192
pw_2048: times 8 dw 2048
@ -106,41 +47,36 @@ pw_1024: times 8 dw 1024
SECTION .text
;
; IDCT helpers
;
; (a*x + b*y + round) >> shift
%macro VP9_MULSUB_2W_2X 6 ; dst1, dst2, src (unchanged), round, coefs1, coefs2
pmaddwd m%1, m%3, %5
pmaddwd m%2, m%3, %6
paddd m%1, m%4
paddd m%2, m%4
psrad m%1, 14
psrad m%2, 14
paddd m%1, %4
paddd m%2, %4
psrad m%1, 14
psrad m%2, 14
%endmacro
%macro VP9_UNPACK_MULSUB_2W_4X 4 ; dst1, dst2, coef1, coef2
punpckhwd m6, m%2, m%1
VP9_MULSUB_2W_2X 4, 5, 6, 7, [pw_m%3_%4], [pw_%4_%3]
%macro VP9_UNPACK_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1, tmp2
punpckhwd m%6, m%2, m%1
VP9_MULSUB_2W_2X %7, %6, %6, %5, [pw_m%3_%4], [pw_%4_%3]
punpcklwd m%2, m%1
VP9_MULSUB_2W_2X %1, 6, %2, 7, [pw_m%3_%4], [pw_%4_%3]
packssdw m%1, m4
packssdw m6, m5
SWAP %2, 6
VP9_MULSUB_2W_2X %1, %2, %2, %5, [pw_m%3_%4], [pw_%4_%3]
packssdw m%1, m%7
packssdw m%2, m%6
%endmacro
%macro VP9_STORE_2X 2
movh m6, [dstq]
movh m7, [dstq+strideq]
punpcklbw m6, m4
punpcklbw m7, m4
paddw m6, %1
paddw m7, %2
packuswb m6, m4
packuswb m7, m4
movh [dstq], m6
movh [dstq+strideq], m7
%macro VP9_STORE_2X 5 ; reg1, reg2, tmp1, tmp2, zero
movh m%3, [dstq]
movh m%4, [dstq+strideq]
punpcklbw m%3, m%5
punpcklbw m%4, m%5
paddw m%3, m%1
paddw m%4, m%2
packuswb m%3, m%5
packuswb m%4, m%5
movh [dstq], m%3
movh [dstq+strideq], m%4
%endmacro
;-------------------------------------------------------------------------------------------
@ -155,10 +91,9 @@ SECTION .text
%macro VP9_IDCT4_1D 0
SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
mova m4, [pw_11585x2]
pmulhrsw m2, m4 ; m2=t0
pmulhrsw m0, m4 ; m0=t1
VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270 ; m1=t2, m3=t3
pmulhrsw m2, m6 ; m2=t0
pmulhrsw m0, m6 ; m0=t1
VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
VP9_IDCT4_1D_FINALIZE
%endmacro
@ -176,11 +111,11 @@ SECTION .text
mova m5, [pw_2048]
pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
pmulhrsw m1, m5
VP9_STORE_2X m0, m1
VP9_STORE_2X 0, 1, 6, 7, 4
lea dstq, [dstq+2*strideq]
pmulhrsw m2, m5
pmulhrsw m3, m5
VP9_STORE_2X m2, m3
VP9_STORE_2X 2, 3, 6, 7, 4
%endmacro
INIT_MMX ssse3
@ -199,11 +134,10 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
pshufw m0, m0, 0
pxor m4, m4
movh [blockq], m4
mova m5, [pw_2048]
pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
VP9_STORE_2X m0, m0
pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
VP9_STORE_2X 0, 0, 6, 7, 4
lea dstq, [dstq+2*strideq]
VP9_STORE_2X m0, m0
VP9_STORE_2X 0, 0, 6, 7, 4
RET
; faster path for when only top left 2x2 block is set
@ -227,6 +161,7 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
mova m1, [blockq+ 8]
mova m2, [blockq+16]
mova m3, [blockq+24]
mova m6, [pw_11585x2]
mova m7, [pd_8192] ; rounding
VP9_IDCT4_1D
TRANSPOSE4x4W 0, 1, 2, 3, 4
@ -257,9 +192,9 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
SUMSUB_BA w, 8, 0, 4 ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
pmulhrsw m8, m12 ; m8=t0a
pmulhrsw m0, m12 ; m0=t1a
VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270 ; m2=t2a, m10=t3a
VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196 ; m1=t4a, m11=t7a
VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623 ; m9=t5a, m3=t6a
VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270, m7, 4, 5 ; m2=t2a, m10=t3a
VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196, m7, 4, 5 ; m1=t4a, m11=t7a
VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623, m7, 4, 5 ; m9=t5a, m3=t6a
SUMSUB_BA w, 10, 8, 4 ; m10=t0a+t3a (t0), m8=t0a-t3a (t3)
SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
SUMSUB_BA w, 9, 1, 4 ; m9=t4a+t5a (t4), m1=t4a-t5a (t5a)
@ -270,6 +205,26 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
VP9_IDCT8_1D_FINALIZE
%endmacro
%macro VP9_IDCT8_4x4_1D 0
pmulhrsw m0, m12 ; m0=t1a/t0a
pmulhrsw m10, m2, [pw_15137x2] ; m10=t3a
pmulhrsw m2, [pw_6270x2] ; m2=t2a
pmulhrsw m11, m1, [pw_16069x2] ; m11=t7a
pmulhrsw m1, [pw_3196x2] ; m1=t4a
pmulhrsw m9, m3, [pw_9102x2] ; m9=-t5a
pmulhrsw m3, [pw_13623x2] ; m3=t6a
psubw m8, m0, m10 ; m8=t0a-t3a (t3)
paddw m10, m0 ; m10=t0a+t3a (t0)
SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
SUMSUB_BA w, 9, 1, 4 ; m1=t4a+t5a (t4), m9=t4a-t5a (t5a)
SWAP 1, 9
SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
pmulhrsw m1, m12 ; m1=t6
pmulhrsw m11, m12 ; m11=t5
VP9_IDCT8_1D_FINALIZE
%endmacro
; TODO: a lot of t* copies can probably be removed and merged with
; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
%macro VP9_IDCT8_2x2_1D 0
@ -295,19 +250,19 @@ cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
mova m5, [pw_1024]
pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
pmulhrsw m1, m5
VP9_STORE_2X m0, m1
VP9_STORE_2X 0, 1, 6, 7, 4
lea dstq, [dstq+2*strideq]
pmulhrsw m2, m5
pmulhrsw m3, m5
VP9_STORE_2X m2, m3
VP9_STORE_2X 2, 3, 6, 7, 4
lea dstq, [dstq+2*strideq]
pmulhrsw m8, m5
pmulhrsw m9, m5
VP9_STORE_2X m8, m9
VP9_STORE_2X 8, 9, 6, 7, 4
lea dstq, [dstq+2*strideq]
pmulhrsw m10, m5
pmulhrsw m11, m5
VP9_STORE_2X m10, m11
VP9_STORE_2X 10, 11, 6, 7, 4
%endmacro
INIT_XMM ssse3
@ -315,9 +270,12 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
mova m12, [pw_11585x2] ; often used
cmp eobd, 3 ; top left corner or less
cmp eobd, 12 ; top left half or less
jg .idctfull
cmp eobd, 3 ; top left corner or less
jg .idcthalf
cmp eobd, 1 ; faster path for when only DC is set
jne .idcttopleftcorner
@ -329,13 +287,13 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
movd [blockq], m4
mova m5, [pw_1024]
pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
VP9_STORE_2X m0, m0
VP9_STORE_2X 0, 0, 6, 7, 4
lea dstq, [dstq+2*strideq]
VP9_STORE_2X m0, m0
VP9_STORE_2X 0, 0, 6, 7, 4
lea dstq, [dstq+2*strideq]
VP9_STORE_2X m0, m0
VP9_STORE_2X 0, 0, 6, 7, 4
lea dstq, [dstq+2*strideq]
VP9_STORE_2X m0, m0
VP9_STORE_2X 0, 0, 6, 7, 4
RET
; faster path for when only left corner is set (3 input: DC, right to DC, below
@ -354,6 +312,22 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
VP9_IDCT8_WRITEOUT
RET
.idcthalf:
movh m0, [blockq + 0]
movh m1, [blockq +16]
movh m2, [blockq +32]
movh m3, [blockq +48]
VP9_IDCT8_4x4_1D
TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
VP9_IDCT8_4x4_1D
pxor m4, m4
movh [blockq+ 0], m4
movh [blockq+16], m4
movh [blockq+32], m4
movh [blockq+48], m4
VP9_IDCT8_WRITEOUT
RET
.idctfull: ; generic full 8x8 idct/idct
mova m0, [blockq+ 0] ; IN(0)
mova m1, [blockq+ 16] ; IN(1)
@ -379,196 +353,3 @@ cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
VP9_IDCT8_WRITEOUT
RET
%endif
%macro filter_h_fn 1
%assign %%px mmsize/2
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
mova m6, [pw_256]
mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8
mova m8, [filteryq+16]
mova m9, [filteryq+32]
mova m10, [filteryq+48]
%endif
.loop:
movh m0, [srcq-3]
movh m1, [srcq-2]
movh m2, [srcq-1]
movh m3, [srcq+0]
movh m4, [srcq+1]
movh m5, [srcq+2]
punpcklbw m0, m1
punpcklbw m2, m3
movh m1, [srcq+3]
movh m3, [srcq+4]
add srcq, sstrideq
punpcklbw m4, m5
punpcklbw m1, m3
pmaddubsw m0, m7
%if ARCH_X86_64 && mmsize > 8
pmaddubsw m2, m8
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
pmaddubsw m2, [filteryq+16]
pmaddubsw m4, [filteryq+32]
pmaddubsw m1, [filteryq+48]
%endif
paddw m0, m2
paddw m4, m1
paddsw m0, m4
pmulhrsw m0, m6
%ifidn %1, avg
movh m1, [dstq]
%endif
packuswb m0, m0
%ifidn %1, avg
pavgb m0, m1
%endif
movh [dstq], m0
add dstq, dstrideq
dec hd
jg .loop
RET
%endmacro
INIT_MMX ssse3
filter_h_fn put
filter_h_fn avg
INIT_XMM ssse3
filter_h_fn put
filter_h_fn avg
%macro filter_v_fn 1
%assign %%px mmsize/2
%if ARCH_X86_64
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
%else
cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
mov filteryq, r5mp
%define hd r4mp
%endif
sub srcq, sstrideq
lea sstride3q, [sstrideq*3]
sub srcq, sstrideq
mova m6, [pw_256]
sub srcq, sstrideq
mova m7, [filteryq+ 0]
lea src4q, [srcq+sstrideq*4]
%if ARCH_X86_64 && mmsize > 8
mova m8, [filteryq+16]
mova m9, [filteryq+32]
mova m10, [filteryq+48]
%endif
.loop:
; FIXME maybe reuse loads from previous rows, or just
; more generally unroll this to prevent multiple loads of
; the same data?
movh m0, [srcq]
movh m1, [srcq+sstrideq]
movh m2, [srcq+sstrideq*2]
movh m3, [srcq+sstride3q]
movh m4, [src4q]
movh m5, [src4q+sstrideq]
punpcklbw m0, m1
punpcklbw m2, m3
movh m1, [src4q+sstrideq*2]
movh m3, [src4q+sstride3q]
add srcq, sstrideq
add src4q, sstrideq
punpcklbw m4, m5
punpcklbw m1, m3
pmaddubsw m0, m7
%if ARCH_X86_64 && mmsize > 8
pmaddubsw m2, m8
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
pmaddubsw m2, [filteryq+16]
pmaddubsw m4, [filteryq+32]
pmaddubsw m1, [filteryq+48]
%endif
paddw m0, m2
paddw m4, m1
paddsw m0, m4
pmulhrsw m0, m6
%ifidn %1, avg
movh m1, [dstq]
%endif
packuswb m0, m0
%ifidn %1, avg
pavgb m0, m1
%endif
movh [dstq], m0
add dstq, dstrideq
dec hd
jg .loop
RET
%endmacro
INIT_MMX ssse3
filter_v_fn put
filter_v_fn avg
INIT_XMM ssse3
filter_v_fn put
filter_v_fn avg
%macro fpel_fn 6
%if %2 == 4
%define %%srcfn movh
%define %%dstfn movh
%else
%define %%srcfn movu
%define %%dstfn mova
%endif
%if %2 <= 16
cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
lea sstride3q, [sstrideq*3]
lea dstride3q, [dstrideq*3]
%else
cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
%endif
.loop:
%%srcfn m0, [srcq]
%%srcfn m1, [srcq+s%3]
%%srcfn m2, [srcq+s%4]
%%srcfn m3, [srcq+s%5]
lea srcq, [srcq+sstrideq*%6]
%ifidn %1, avg
pavgb m0, [dstq]
pavgb m1, [dstq+d%3]
pavgb m2, [dstq+d%4]
pavgb m3, [dstq+d%5]
%endif
%%dstfn [dstq], m0
%%dstfn [dstq+d%3], m1
%%dstfn [dstq+d%4], m2
%%dstfn [dstq+d%5], m3
lea dstq, [dstq+dstrideq*%6]
sub hd, %6
jnz .loop
RET
%endmacro
%define d16 16
%define s16 16
INIT_MMX mmx
fpel_fn put, 4, strideq, strideq*2, stride3q, 4
fpel_fn put, 8, strideq, strideq*2, stride3q, 4
INIT_MMX sse
fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
INIT_XMM sse
fpel_fn put, 16, strideq, strideq*2, stride3q, 4
fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
INIT_XMM sse2
fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
%undef s16
%undef d16

278
libavcodec/x86/vp9mc.asm Normal file
View File

@ -0,0 +1,278 @@
;******************************************************************************
;* VP9 MC SIMD optimizations
;*
;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
; FIXME share with vp8dsp.asm
pw_256: times 8 dw 256
%macro F8_TAPS 8
times 8 db %1, %2
times 8 db %3, %4
times 8 db %5, %6
times 8 db %7, %8
%endmacro
; int8_t ff_filters_ssse3[3][15][4][16]
const filters_ssse3 ; smooth
F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
; regular
F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
; sharp
F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
SECTION .text
%macro filter_h_fn 1
%assign %%px mmsize/2
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery
mova m6, [pw_256]
mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8
mova m8, [filteryq+16]
mova m9, [filteryq+32]
mova m10, [filteryq+48]
%endif
.loop:
movh m0, [srcq-3]
movh m1, [srcq-2]
movh m2, [srcq-1]
movh m3, [srcq+0]
movh m4, [srcq+1]
movh m5, [srcq+2]
punpcklbw m0, m1
punpcklbw m2, m3
movh m1, [srcq+3]
movh m3, [srcq+4]
add srcq, sstrideq
punpcklbw m4, m5
punpcklbw m1, m3
pmaddubsw m0, m7
%if ARCH_X86_64 && mmsize > 8
pmaddubsw m2, m8
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
pmaddubsw m2, [filteryq+16]
pmaddubsw m4, [filteryq+32]
pmaddubsw m1, [filteryq+48]
%endif
paddw m0, m2
paddw m4, m1
paddsw m0, m4
pmulhrsw m0, m6
%ifidn %1, avg
movh m1, [dstq]
%endif
packuswb m0, m0
%ifidn %1, avg
pavgb m0, m1
%endif
movh [dstq], m0
add dstq, dstrideq
dec hd
jg .loop
RET
%endmacro
INIT_MMX ssse3
filter_h_fn put
filter_h_fn avg
INIT_XMM ssse3
filter_h_fn put
filter_h_fn avg
%macro filter_v_fn 1
%assign %%px mmsize/2
%if ARCH_X86_64
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3
%else
cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3
mov filteryq, r5mp
%define hd r4mp
%endif
sub srcq, sstrideq
lea sstride3q, [sstrideq*3]
sub srcq, sstrideq
mova m6, [pw_256]
sub srcq, sstrideq
mova m7, [filteryq+ 0]
lea src4q, [srcq+sstrideq*4]
%if ARCH_X86_64 && mmsize > 8
mova m8, [filteryq+16]
mova m9, [filteryq+32]
mova m10, [filteryq+48]
%endif
.loop:
; FIXME maybe reuse loads from previous rows, or just
; more generally unroll this to prevent multiple loads of
; the same data?
movh m0, [srcq]
movh m1, [srcq+sstrideq]
movh m2, [srcq+sstrideq*2]
movh m3, [srcq+sstride3q]
movh m4, [src4q]
movh m5, [src4q+sstrideq]
punpcklbw m0, m1
punpcklbw m2, m3
movh m1, [src4q+sstrideq*2]
movh m3, [src4q+sstride3q]
add srcq, sstrideq
add src4q, sstrideq
punpcklbw m4, m5
punpcklbw m1, m3
pmaddubsw m0, m7
%if ARCH_X86_64 && mmsize > 8
pmaddubsw m2, m8
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
pmaddubsw m2, [filteryq+16]
pmaddubsw m4, [filteryq+32]
pmaddubsw m1, [filteryq+48]
%endif
paddw m0, m2
paddw m4, m1
paddsw m0, m4
pmulhrsw m0, m6
%ifidn %1, avg
movh m1, [dstq]
%endif
packuswb m0, m0
%ifidn %1, avg
pavgb m0, m1
%endif
movh [dstq], m0
add dstq, dstrideq
dec hd
jg .loop
RET
%endmacro
INIT_MMX ssse3
filter_v_fn put
filter_v_fn avg
INIT_XMM ssse3
filter_v_fn put
filter_v_fn avg
%macro fpel_fn 6
%if %2 == 4
%define %%srcfn movh
%define %%dstfn movh
%else
%define %%srcfn movu
%define %%dstfn mova
%endif
%if %2 <= 16
cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3
lea sstride3q, [sstrideq*3]
lea dstride3q, [dstrideq*3]
%else
cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h
%endif
.loop:
%%srcfn m0, [srcq]
%%srcfn m1, [srcq+s%3]
%%srcfn m2, [srcq+s%4]
%%srcfn m3, [srcq+s%5]
lea srcq, [srcq+sstrideq*%6]
%ifidn %1, avg
pavgb m0, [dstq]
pavgb m1, [dstq+d%3]
pavgb m2, [dstq+d%4]
pavgb m3, [dstq+d%5]
%endif
%%dstfn [dstq], m0
%%dstfn [dstq+d%3], m1
%%dstfn [dstq+d%4], m2
%%dstfn [dstq+d%5], m3
lea dstq, [dstq+dstrideq*%6]
sub hd, %6
jnz .loop
RET
%endmacro
%define d16 16
%define s16 16
INIT_MMX mmx
fpel_fn put, 4, strideq, strideq*2, stride3q, 4
fpel_fn put, 8, strideq, strideq*2, stride3q, 4
INIT_MMX sse
fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
INIT_XMM sse
fpel_fn put, 16, strideq, strideq*2, stride3q, 4
fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
INIT_XMM sse2
fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
%undef s16
%undef d16