From 7adcd4e841f351aad6c902a08e13d51f7137ad6e Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:44 +0100 Subject: [PATCH 1/7] x86inc: Make cpuflag() and notcpuflag() return 0 or 1 Makes it possible to use them in arithmetic expressions. Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index 6ad9785536..afcd6b84ac 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -773,8 +773,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt %assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1 -%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) -%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) +; Returns a boolean value expressing whether or not the specified cpuflag is enabled. +%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1) +%define notcpuflag(x) (cpuflag(x) ^ 1) ; Takes an arbitrary number of cpuflags from the above list. ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. From f60f06d9894b06d5371729c84f8fd7219909e2bf Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:45 +0100 Subject: [PATCH 2/7] x86inc: Be more verbose in assertion failures Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index afcd6b84ac..dabb6cc15c 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -295,7 +295,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %macro ASSERT 1 %if (%1) == 0 - %error assert failed + %error assertion ``%1'' failed %endif %endmacro From 715eb7ca24c9fda8aed88a3276f5d11567c355bc Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:46 +0100 Subject: [PATCH 3/7] x86inc: Improve FMA instruction handling * Correctly handle FMA instructions with memory operands. * Print a warning if FMA instructions are used without the correct cpuflag. * Simplify the instantiation code. * Clarify documentation. Only the last operand in FMA3 instructions can be a memory operand. When converting FMA4 instructions to FMA3 instructions we can utilize the fact that multiply is a commutative operation and reorder operands if necessary to ensure that a memory operand is used only as the last operand. Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 77 +++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index dabb6cc15c..fc58b74e2a 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -1,7 +1,7 @@ ;***************************************************************************** ;* x86inc.asm: x264asm abstraction layer ;***************************************************************************** -;* Copyright (C) 2005-2015 x264 project +;* Copyright (C) 2005-2016 x264 project ;* ;* Authors: Loren Merritt ;* Anton Mitrofanov @@ -1456,47 +1456,44 @@ FMA_INSTR pmadcswd, pmaddwd, paddd ; This lets us use tzcnt without bumping the yasm version requirement yet. %define tzcnt rep bsf -; convert FMA4 to FMA3 if possible -%macro FMA4_INSTR 4 - %macro %1 4-8 %1, %2, %3, %4 - %if cpuflag(fma4) - v%5 %1, %2, %3, %4 - %elifidn %1, %2 - v%6 %1, %4, %3 ; %1 = %1 * %3 + %4 - %elifidn %1, %3 - v%7 %1, %2, %4 ; %1 = %2 * %1 + %4 - %elifidn %1, %4 - v%8 %1, %2, %3 ; %1 = %2 * %3 + %1 - %else - %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported - %endif - %endmacro +; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax. +; FMA3 is only possible if dst is the same as one of the src registers. +; Either src2 or src3 can be a memory operand. +%macro FMA4_INSTR 2-* + %push fma4_instr + %xdefine %$prefix %1 + %rep %0 - 1 + %macro %$prefix%2 4-6 %$prefix, %2 + %if notcpuflag(fma3) && notcpuflag(fma4) + %error use of ``%5%6'' fma instruction in cpuname function: current_function + %elif cpuflag(fma4) + v%5%6 %1, %2, %3, %4 + %elifidn %1, %2 + ; If %3 or %4 is a memory operand it needs to be encoded as the last operand. + %ifid %3 + v%{5}213%6 %2, %3, %4 + %else + v%{5}132%6 %2, %4, %3 + %endif + %elifidn %1, %3 + v%{5}213%6 %3, %2, %4 + %elifidn %1, %4 + v%{5}231%6 %4, %2, %3 + %else + %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported + %endif + %endmacro + %rotate 1 + %endrep + %pop %endmacro -FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd -FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps -FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd -FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss - -FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd -FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps -FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd -FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps - -FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd -FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps -FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd -FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss - -FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd -FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps -FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd -FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss - -FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd -FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps -FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd -FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss +FMA4_INSTR fmadd, pd, ps, sd, ss +FMA4_INSTR fmaddsub, pd, ps +FMA4_INSTR fmsub, pd, ps, sd, ss +FMA4_INSTR fmsubadd, pd, ps +FMA4_INSTR fnmadd, pd, ps, sd, ss +FMA4_INSTR fnmsub, pd, ps, sd, ss ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0) %ifdef __YASM_VER__ From 91ed050f426ba9747ea42b14e762ed412a28da76 Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Thu, 21 Jan 2016 00:05:15 +0100 Subject: [PATCH 4/7] x86inc: Preserve arguments when allocating stack space When allocating stack space with a larger alignment than the known stack alignment a temporary register is used for storing the stack pointer. Ensure that this isn't one of the registers used for passing arguments. Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index fc58b74e2a..7d6c1713dd 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -386,8 +386,11 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT %if %1 > 0 %assign regs_used (regs_used + 1) - %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2 - %warning "Stack pointer will overwrite register argument" + %endif + %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3 + ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax) + ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used. + %assign regs_used 5 + UNIX64 * 3 %endif %endif %endif From 5ca8e195e51c43dcc508888ca1560ac8f2b33e51 Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:48 +0100 Subject: [PATCH 5/7] x86inc: Use more consistent indentation Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 134 +++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index 7d6c1713dd..e971a0a93e 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -183,9 +183,9 @@ %define e%1h %3 %define r%1b %2 %define e%1b %2 -%if ARCH_X86_64 == 0 - %define r%1 e%1 -%endif + %if ARCH_X86_64 == 0 + %define r%1 e%1 + %endif %endmacro DECLARE_REG_SIZE ax, al, ah @@ -504,9 +504,9 @@ DECLARE_REG 14, R15, 120 %macro RET 0 WIN64_RESTORE_XMM_INTERNAL rsp POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro @@ -543,17 +543,17 @@ DECLARE_REG 14, R15, 72 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 %macro RET 0 -%if stack_size_padded > 0 -%if required_stack_alignment > STACK_ALIGNMENT - mov rsp, rstkm -%else - add rsp, stack_size_padded -%endif -%endif + %if stack_size_padded > 0 + %if required_stack_alignment > STACK_ALIGNMENT + mov rsp, rstkm + %else + add rsp, stack_size_padded + %endif + %endif POP_IF_USED 14, 13, 12, 11, 10, 9 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro @@ -599,29 +599,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 %macro RET 0 -%if stack_size_padded > 0 -%if required_stack_alignment > STACK_ALIGNMENT - mov rsp, rstkm -%else - add rsp, stack_size_padded -%endif -%endif + %if stack_size_padded > 0 + %if required_stack_alignment > STACK_ALIGNMENT + mov rsp, rstkm + %else + add rsp, stack_size_padded + %endif + %endif POP_IF_USED 6, 5, 4, 3 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro %endif ;====================================================================== %if WIN64 == 0 -%macro WIN64_SPILL_XMM 1 -%endmacro -%macro WIN64_RESTORE_XMM 1 -%endmacro -%macro WIN64_PUSH_XMM 0 -%endmacro + %macro WIN64_SPILL_XMM 1 + %endmacro + %macro WIN64_RESTORE_XMM 1 + %endmacro + %macro WIN64_PUSH_XMM 0 + %endmacro %endif ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either @@ -847,14 +847,14 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %define movnta movntq %assign %%i 0 %rep 8 - CAT_XDEFINE m, %%i, mm %+ %%i - CAT_XDEFINE nnmm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, mm %+ %%i + CAT_XDEFINE nnmm, %%i, %%i + %assign %%i %%i+1 %endrep %rep 8 - CAT_UNDEF m, %%i - CAT_UNDEF nnmm, %%i - %assign %%i %%i+1 + CAT_UNDEF m, %%i + CAT_UNDEF nnmm, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -865,7 +865,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %define mmsize 16 %define num_mmregs 8 %if ARCH_X86_64 - %define num_mmregs 16 + %define num_mmregs 16 %endif %define mova movdqa %define movu movdqu @@ -873,9 +873,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %define movnta movntdq %assign %%i 0 %rep num_mmregs - CAT_XDEFINE m, %%i, xmm %+ %%i - CAT_XDEFINE nnxmm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, xmm %+ %%i + CAT_XDEFINE nnxmm, %%i, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -886,7 +886,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %define mmsize 32 %define num_mmregs 8 %if ARCH_X86_64 - %define num_mmregs 16 + %define num_mmregs 16 %endif %define mova movdqa %define movu movdqu @@ -894,9 +894,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %define movnta movntdq %assign %%i 0 %rep num_mmregs - CAT_XDEFINE m, %%i, ymm %+ %%i - CAT_XDEFINE nnymm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, ymm %+ %%i + CAT_XDEFINE nnymm, %%i, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -920,7 +920,7 @@ INIT_XMM %assign i 0 %rep 16 DECLARE_MMCAST i -%assign i i+1 + %assign i i+1 %endrep ; I often want to use macros that permute their arguments. e.g. there's no @@ -938,23 +938,23 @@ INIT_XMM ; doesn't cost any cycles. %macro PERMUTE 2-* ; takes a list of pairs to swap -%rep %0/2 - %xdefine %%tmp%2 m%2 - %rotate 2 -%endrep -%rep %0/2 - %xdefine m%1 %%tmp%2 - CAT_XDEFINE nn, m%1, %1 - %rotate 2 -%endrep + %rep %0/2 + %xdefine %%tmp%2 m%2 + %rotate 2 + %endrep + %rep %0/2 + %xdefine m%1 %%tmp%2 + CAT_XDEFINE nn, m%1, %1 + %rotate 2 + %endrep %endmacro %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) -%ifnum %1 ; SWAP 0, 1, ... - SWAP_INTERNAL_NUM %1, %2 -%else ; SWAP m0, m1, ... - SWAP_INTERNAL_NAME %1, %2 -%endif + %ifnum %1 ; SWAP 0, 1, ... + SWAP_INTERNAL_NUM %1, %2 + %else ; SWAP m0, m1, ... + SWAP_INTERNAL_NAME %1, %2 + %endif %endmacro %macro SWAP_INTERNAL_NUM 2-* @@ -964,7 +964,7 @@ INIT_XMM %xdefine m%2 %%tmp CAT_XDEFINE nn, m%1, %1 CAT_XDEFINE nn, m%2, %2 - %rotate 1 + %rotate 1 %endrep %endmacro @@ -972,7 +972,7 @@ INIT_XMM %xdefine %%args nn %+ %1 %rep %0-1 %xdefine %%args %%args, nn %+ %2 - %rotate 1 + %rotate 1 %endrep SWAP_INTERNAL_NUM %%args %endmacro @@ -989,7 +989,7 @@ INIT_XMM %assign %%i 0 %rep num_mmregs CAT_XDEFINE %%f, %%i, m %+ %%i - %assign %%i %%i+1 + %assign %%i %%i+1 %endrep %endmacro @@ -999,7 +999,7 @@ INIT_XMM %rep num_mmregs CAT_XDEFINE m, %%i, %1_m %+ %%i CAT_XDEFINE nn, m %+ %%i, %%i - %assign %%i %%i+1 + %assign %%i %%i+1 %endrep %endif %endmacro @@ -1055,7 +1055,7 @@ INIT_XMM %endif CAT_XDEFINE sizeofxmm, i, 16 CAT_XDEFINE sizeofymm, i, 32 -%assign i i+1 + %assign i i+1 %endrep %undef i @@ -1432,7 +1432,7 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1 %else CAT_XDEFINE q, j, i %endif -%assign i i+1 + %assign i i+1 %endrep %undef i %undef j From fd6ecac38eb382d1ed41c2ceafa052a3eb6593fc Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:49 +0100 Subject: [PATCH 6/7] x86inc: Simplify AUTO_REP_RET cpuflags is never undefined any more, it's set to 0 instead. Also fix an incorrect comment. Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index e971a0a93e..ff51a2704b 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -638,10 +638,8 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %define last_branch_adr $$ %macro AUTO_REP_RET 0 - %ifndef cpuflags - times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr. - %elif notcpuflag(ssse3) - times ((last_branch_adr-$)>>31)+1 rep + %if notcpuflag(ssse3) + times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr. %endif ret %endmacro From 002c47798da0c43a053822c8041144798d49ed84 Mon Sep 17 00:00:00 2001 From: Henrik Gramner Date: Mon, 18 Jan 2016 00:21:50 +0100 Subject: [PATCH 7/7] x86inc: Avoid creating unnecessary local labels The REP_RET workaround is only needed on old AMD cpus, and the labels clutter up the symbol table and confuse debugging/profiling tools, so use EQU to create SHN_ABS symbols instead of creating local labels. Furthermore, skip the workaround completely in functions that definitely won't run on such cpus. Note that EQU is just creating a local label when using nasm instead of yasm. This is probably a bug, but at least it doesn't break anything. Signed-off-by: Anton Khirnov --- libavutil/x86/x86inc.asm | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm index ff51a2704b..8a74830a20 100644 --- a/libavutil/x86/x86inc.asm +++ b/libavutil/x86/x86inc.asm @@ -648,8 +648,10 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %rep %0 %macro %1 1-2 %1 %2 %1 - %%branch_instr: - %xdefine last_branch_adr %%branch_instr + %if notcpuflag(ssse3) + %%branch_instr equ $ + %xdefine last_branch_adr %%branch_instr + %endif %endmacro %rotate 1 %endrep