mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Make x86util.asm LGPL so we can use it in LGPL asm
Strip out most x264-specific stuff (not used anywhere in ffmpeg). Originally committed as revision 23877 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
bc14f04b2f
commit
37355fe823
@ -1,28 +1,27 @@
|
||||
;*****************************************************************************
|
||||
;* x86util.asm
|
||||
;*****************************************************************************
|
||||
;* Copyright (C) 2008 x264 project
|
||||
;* Copyright (C) 2008-2010 x264 project
|
||||
;*
|
||||
;* Authors: Holger Lubitz <holger@lubitz.org>
|
||||
;* Loren Merritt <lorenm@u.washington.edu>
|
||||
;* Authors: Loren Merritt <lorenm@u.washington.edu>
|
||||
;* Holger Lubitz <holger@lubitz.org>
|
||||
;*
|
||||
;* This program is free software; you can redistribute it and/or modify
|
||||
;* it under the terms of the GNU General Public License as published by
|
||||
;* the Free Software Foundation; either version 2 of the License, or
|
||||
;* (at your option) any later version.
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* This program is distributed in the hope that it will be useful,
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
;* GNU General Public License for more details.
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU General Public License
|
||||
;* along with this program; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
|
||||
;*****************************************************************************
|
||||
|
||||
%assign FENC_STRIDE 16
|
||||
%assign FDEC_STRIDE 32
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%macro SBUTTERFLY 4
|
||||
mova m%4, m%2
|
||||
@ -244,165 +243,6 @@
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD4_V 4+
|
||||
SUMSUB_BADC %1, %2, %3, %4
|
||||
SUMSUB_BADC %1, %3, %2, %4
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD8_V 8+
|
||||
SUMSUB_BADC %1, %2, %3, %4
|
||||
SUMSUB_BADC %5, %6, %7, %8
|
||||
SUMSUB_BADC %1, %3, %2, %4
|
||||
SUMSUB_BADC %5, %7, %6, %8
|
||||
SUMSUB_BADC %1, %5, %2, %6
|
||||
SUMSUB_BADC %3, %7, %4, %8
|
||||
%endmacro
|
||||
|
||||
%macro TRANS_SSE2 5-6
|
||||
; TRANSPOSE2x2
|
||||
; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
|
||||
; %2: ord/unord (for compat with sse4, unused)
|
||||
; %3/%4: source regs
|
||||
; %5/%6: tmp regs
|
||||
%ifidn %1, d
|
||||
%define mask [mask_10]
|
||||
%define shift 16
|
||||
%elifidn %1, q
|
||||
%define mask [mask_1100]
|
||||
%define shift 32
|
||||
%endif
|
||||
%if %0==6 ; less dependency if we have two tmp
|
||||
mova m%5, mask ; ff00
|
||||
mova m%6, m%4 ; x5x4
|
||||
psll%1 m%4, shift ; x4..
|
||||
pand m%6, m%5 ; x5..
|
||||
pandn m%5, m%3 ; ..x0
|
||||
psrl%1 m%3, shift ; ..x1
|
||||
por m%4, m%5 ; x4x0
|
||||
por m%3, m%6 ; x5x1
|
||||
%else ; more dependency, one insn less. sometimes faster, sometimes not
|
||||
mova m%5, m%4 ; x5x4
|
||||
psll%1 m%4, shift ; x4..
|
||||
pxor m%4, m%3 ; (x4^x1)x0
|
||||
pand m%4, mask ; (x4^x1)..
|
||||
pxor m%3, m%4 ; x4x0
|
||||
psrl%1 m%4, shift ; ..(x1^x4)
|
||||
pxor m%5, m%4 ; x5x1
|
||||
SWAP %4, %3, %5
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro TRANS_SSE4 5-6 ; see above
|
||||
%ifidn %1, d
|
||||
mova m%5, m%3
|
||||
%ifidn %2, ord
|
||||
psrl%1 m%3, 16
|
||||
%endif
|
||||
pblendw m%3, m%4, 10101010b
|
||||
psll%1 m%4, 16
|
||||
%ifidn %2, ord
|
||||
pblendw m%4, m%5, 01010101b
|
||||
%else
|
||||
psrl%1 m%5, 16
|
||||
por m%4, m%5
|
||||
%endif
|
||||
%elifidn %1, q
|
||||
mova m%5, m%3
|
||||
shufps m%3, m%4, 10001000b
|
||||
shufps m%5, m%4, 11011101b
|
||||
SWAP %4, %5
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD 5-6
|
||||
; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
|
||||
; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
|
||||
; %3/%4: regs
|
||||
; %5(%6): tmpregs
|
||||
%if %1!=0 ; have to reorder stuff for horizontal op
|
||||
%ifidn %2, sumsub
|
||||
%define ORDER ord
|
||||
; sumsub needs order because a-b != b-a unless a=b
|
||||
%else
|
||||
%define ORDER unord
|
||||
; if we just max, order doesn't matter (allows pblendw+or in sse4)
|
||||
%endif
|
||||
%if %1==1
|
||||
TRANS d, ORDER, %3, %4, %5, %6
|
||||
%elif %1==2
|
||||
%if mmsize==8
|
||||
SBUTTERFLY dq, %3, %4, %5
|
||||
%else
|
||||
TRANS q, ORDER, %3, %4, %5, %6
|
||||
%endif
|
||||
%elif %1==4
|
||||
SBUTTERFLY qdq, %3, %4, %5
|
||||
%endif
|
||||
%endif
|
||||
%ifidn %2, sumsub
|
||||
SUMSUB_BA m%3, m%4, m%5
|
||||
%else
|
||||
%ifidn %2, amax
|
||||
%if %0==6
|
||||
ABS2 m%3, m%4, m%5, m%6
|
||||
%else
|
||||
ABS1 m%3, m%5
|
||||
ABS1 m%4, m%5
|
||||
%endif
|
||||
%endif
|
||||
pmaxsw m%3, m%4
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
|
||||
%macro HADAMARD2_2D 6-7 sumsub
|
||||
HADAMARD 0, sumsub, %1, %2, %5
|
||||
HADAMARD 0, sumsub, %3, %4, %5
|
||||
SBUTTERFLY %6, %1, %2, %5
|
||||
%ifnum %7
|
||||
HADAMARD 0, amax, %1, %2, %5, %7
|
||||
%else
|
||||
HADAMARD 0, %7, %1, %2, %5
|
||||
%endif
|
||||
SBUTTERFLY %6, %3, %4, %5
|
||||
%ifnum %7
|
||||
HADAMARD 0, amax, %3, %4, %5, %7
|
||||
%else
|
||||
HADAMARD 0, %7, %3, %4, %5
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD4_2D 5-6 sumsub
|
||||
HADAMARD2_2D %1, %2, %3, %4, %5, wd
|
||||
HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
|
||||
SWAP %2, %3
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD4_2D_SSE 5-6 sumsub
|
||||
HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
|
||||
HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
|
||||
SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0
|
||||
SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2
|
||||
HADAMARD2_2D %1, %3, %2, %4, %5, dq
|
||||
SBUTTERFLY qdq, %1, %2, %5
|
||||
HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1
|
||||
SBUTTERFLY qdq, %3, %4, %5
|
||||
HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3
|
||||
%endmacro
|
||||
|
||||
%macro HADAMARD8_2D 9-10 sumsub
|
||||
HADAMARD2_2D %1, %2, %3, %4, %9, wd
|
||||
HADAMARD2_2D %5, %6, %7, %8, %9, wd
|
||||
HADAMARD2_2D %1, %3, %2, %4, %9, dq
|
||||
HADAMARD2_2D %5, %7, %6, %8, %9, dq
|
||||
HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
|
||||
HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
|
||||
%ifnidn %10, amax
|
||||
SWAP %2, %5
|
||||
SWAP %4, %7
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro SUMSUB2_AB 3
|
||||
mova %3, %1
|
||||
paddw %1, %1
|
||||
@ -483,32 +323,6 @@
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_DIFF8x4_SSE2 8
|
||||
LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE]
|
||||
LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE]
|
||||
LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE]
|
||||
LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE]
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_DIFF8x4_SSSE3 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
|
||||
movh m%2, [%8+%1*FDEC_STRIDE]
|
||||
movh m%1, [%7+%1*FENC_STRIDE]
|
||||
punpcklbw m%1, m%2
|
||||
movh m%3, [%8+%2*FDEC_STRIDE]
|
||||
movh m%2, [%7+%2*FENC_STRIDE]
|
||||
punpcklbw m%2, m%3
|
||||
movh m%4, [%8+%3*FDEC_STRIDE]
|
||||
movh m%3, [%7+%3*FENC_STRIDE]
|
||||
punpcklbw m%3, m%4
|
||||
movh m%5, [%8+%4*FDEC_STRIDE]
|
||||
movh m%4, [%7+%4*FENC_STRIDE]
|
||||
punpcklbw m%4, m%5
|
||||
pmaddubsw m%1, m%6
|
||||
pmaddubsw m%2, m%6
|
||||
pmaddubsw m%3, m%6
|
||||
pmaddubsw m%4, m%6
|
||||
%endmacro
|
||||
|
||||
%macro STORE_DCT 6
|
||||
movq [%5+%6+ 0], m%1
|
||||
movq [%5+%6+ 8], m%2
|
||||
@ -520,17 +334,6 @@
|
||||
movhps [%5+%6+56], m%4
|
||||
%endmacro
|
||||
|
||||
%macro STORE_IDCT 4
|
||||
movhps [r0-4*FDEC_STRIDE], %1
|
||||
movh [r0-3*FDEC_STRIDE], %1
|
||||
movhps [r0-2*FDEC_STRIDE], %2
|
||||
movh [r0-1*FDEC_STRIDE], %2
|
||||
movhps [r0+0*FDEC_STRIDE], %3
|
||||
movh [r0+1*FDEC_STRIDE], %3
|
||||
movhps [r0+2*FDEC_STRIDE], %4
|
||||
movh [r0+3*FDEC_STRIDE], %4
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
|
||||
LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
|
||||
LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
|
||||
|
Loading…
Reference in New Issue
Block a user