mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-02 03:06:28 +02:00
bbe95f7353
From x86inc: > On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either > a branch or a branch target. So switch to a 2-byte form of ret in that case. > We can automatically detect "follows a branch", but not a branch target. > (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.) x86inc can automatically determine whether to use REP_RET rather than REP in most of these cases, so impact is minimal. Additionally, a few REP_RETs were used unnecessary, despite the return being nowhere near a branch. The only CPUs affected were AMD K10s, made between 2007 and 2011, 16 years ago and 12 years ago, respectively. In the future, everyone involved with x86inc should consider dropping REP_RETs altogether.
270 lines
7.7 KiB
NASM
270 lines
7.7 KiB
NASM
;*****************************************************************************
|
|
;* MMX/SSE2/AVX-optimized 10-bit H.264 chroma MC code
|
|
;*****************************************************************************
|
|
;* Copyright (C) 2005-2011 x264 project
|
|
;*
|
|
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
|
|
;*
|
|
;* This file is part of FFmpeg.
|
|
;*
|
|
;* FFmpeg is free software; you can redistribute it and/or
|
|
;* modify it under the terms of the GNU Lesser General Public
|
|
;* License as published by the Free Software Foundation; either
|
|
;* version 2.1 of the License, or (at your option) any later version.
|
|
;*
|
|
;* FFmpeg is distributed in the hope that it will be useful,
|
|
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
;* Lesser General Public License for more details.
|
|
;*
|
|
;* You should have received a copy of the GNU Lesser General Public
|
|
;* License along with FFmpeg; if not, write to the Free Software
|
|
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
;******************************************************************************
|
|
|
|
%include "libavutil/x86/x86util.asm"
|
|
|
|
SECTION_RODATA
|
|
|
|
cextern pw_4
|
|
cextern pw_8
|
|
cextern pw_32
|
|
cextern pw_64
|
|
|
|
SECTION .text
|
|
|
|
|
|
%macro MV0_PIXELS_MC8 0
|
|
lea r4, [r2*3 ]
|
|
lea r5, [r2*4 ]
|
|
.next4rows:
|
|
movu m0, [r1 ]
|
|
movu m1, [r1+r2 ]
|
|
CHROMAMC_AVG m0, [r0 ]
|
|
CHROMAMC_AVG m1, [r0+r2 ]
|
|
mova [r0 ], m0
|
|
mova [r0+r2 ], m1
|
|
movu m0, [r1+r2*2]
|
|
movu m1, [r1+r4 ]
|
|
CHROMAMC_AVG m0, [r0+r2*2]
|
|
CHROMAMC_AVG m1, [r0+r4 ]
|
|
mova [r0+r2*2], m0
|
|
mova [r0+r4 ], m1
|
|
add r1, r5
|
|
add r0, r5
|
|
sub r3d, 4
|
|
jne .next4rows
|
|
%endmacro
|
|
|
|
;-----------------------------------------------------------------------------
|
|
; void ff_put/avg_h264_chroma_mc8(pixel *dst, const pixel *src, ptrdiff_t stride,
|
|
; int h, int mx, int my)
|
|
;-----------------------------------------------------------------------------
|
|
%macro CHROMA_MC8 1
|
|
cglobal %1_h264_chroma_mc8_10, 6,7,8
|
|
mov r6d, r5d
|
|
or r6d, r4d
|
|
jne .at_least_one_non_zero
|
|
; mx == 0 AND my == 0 - no filter needed
|
|
MV0_PIXELS_MC8
|
|
RET
|
|
|
|
.at_least_one_non_zero:
|
|
mov r6d, 2
|
|
test r5d, r5d
|
|
je .x_interpolation
|
|
mov r6, r2 ; dxy = x ? 1 : stride
|
|
test r4d, r4d
|
|
jne .xy_interpolation
|
|
.x_interpolation:
|
|
; mx == 0 XOR my == 0 - 1 dimensional filter only
|
|
or r4d, r5d ; x + y
|
|
movd m5, r4d
|
|
mova m4, [pw_8]
|
|
mova m6, [pw_4] ; mm6 = rnd >> 3
|
|
SPLATW m5, m5 ; mm5 = B = x
|
|
psubw m4, m5 ; mm4 = A = 8-x
|
|
|
|
.next1drow:
|
|
movu m0, [r1 ] ; mm0 = src[0..7]
|
|
movu m2, [r1+r6] ; mm2 = src[1..8]
|
|
|
|
pmullw m0, m4 ; mm0 = A * src[0..7]
|
|
pmullw m2, m5 ; mm2 = B * src[1..8]
|
|
|
|
paddw m0, m6
|
|
paddw m0, m2
|
|
psrlw m0, 3
|
|
CHROMAMC_AVG m0, [r0]
|
|
mova [r0], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
|
|
|
|
add r0, r2
|
|
add r1, r2
|
|
dec r3d
|
|
jne .next1drow
|
|
RET
|
|
|
|
.xy_interpolation: ; general case, bilinear
|
|
movd m4, r4m ; x
|
|
movd m6, r5m ; y
|
|
|
|
SPLATW m4, m4 ; mm4 = x words
|
|
SPLATW m6, m6 ; mm6 = y words
|
|
psllw m5, m4, 3 ; mm5 = 8x
|
|
pmullw m4, m6 ; mm4 = x * y
|
|
psllw m6, 3 ; mm6 = 8y
|
|
paddw m1, m5, m6 ; mm7 = 8x+8y
|
|
mova m7, m4 ; DD = x * y
|
|
psubw m5, m4 ; mm5 = B = 8x - xy
|
|
psubw m6, m4 ; mm6 = C = 8y - xy
|
|
paddw m4, [pw_64]
|
|
psubw m4, m1 ; mm4 = A = xy - (8x+8y) + 64
|
|
|
|
movu m0, [r1 ] ; mm0 = src[0..7]
|
|
movu m1, [r1+2] ; mm1 = src[1..8]
|
|
.next2drow:
|
|
add r1, r2
|
|
|
|
pmullw m2, m0, m4
|
|
pmullw m1, m5
|
|
paddw m2, m1 ; mm2 = A * src[0..7] + B * src[1..8]
|
|
|
|
movu m0, [r1]
|
|
movu m1, [r1+2]
|
|
pmullw m3, m0, m6
|
|
paddw m2, m3 ; mm2 += C * src[0..7+strde]
|
|
pmullw m3, m1, m7
|
|
paddw m2, m3 ; mm2 += D * src[1..8+strde]
|
|
|
|
paddw m2, [pw_32]
|
|
psrlw m2, 6
|
|
CHROMAMC_AVG m2, [r0]
|
|
mova [r0], m2 ; dst[0..7] = (mm2 + 32) >> 6
|
|
|
|
add r0, r2
|
|
dec r3d
|
|
jne .next2drow
|
|
RET
|
|
%endmacro
|
|
|
|
;-----------------------------------------------------------------------------
|
|
; void ff_put/avg_h264_chroma_mc4(pixel *dst, pixel *src, ptrdiff_t stride,
|
|
; int h, int mx, int my)
|
|
;-----------------------------------------------------------------------------
|
|
;TODO: xmm mc4
|
|
%macro MC4_OP 2
|
|
movq %1, [r1 ]
|
|
movq m1, [r1+2]
|
|
add r1, r2
|
|
pmullw %1, m4
|
|
pmullw m1, m2
|
|
paddw m1, %1
|
|
mova %1, m1
|
|
|
|
pmullw %2, m5
|
|
pmullw m1, m3
|
|
paddw %2, [pw_32]
|
|
paddw m1, %2
|
|
psrlw m1, 6
|
|
CHROMAMC_AVG m1, %2, [r0]
|
|
movq [r0], m1
|
|
add r0, r2
|
|
%endmacro
|
|
|
|
%macro CHROMA_MC4 1
|
|
cglobal %1_h264_chroma_mc4_10, 6,6,7
|
|
movd m2, r4m ; x
|
|
movd m3, r5m ; y
|
|
mova m4, [pw_8]
|
|
mova m5, m4
|
|
SPLATW m2, m2
|
|
SPLATW m3, m3
|
|
psubw m4, m2
|
|
psubw m5, m3
|
|
|
|
movq m0, [r1 ]
|
|
movq m6, [r1+2]
|
|
add r1, r2
|
|
pmullw m0, m4
|
|
pmullw m6, m2
|
|
paddw m6, m0
|
|
|
|
.next2rows:
|
|
MC4_OP m0, m6
|
|
MC4_OP m6, m0
|
|
sub r3d, 2
|
|
jnz .next2rows
|
|
RET
|
|
%endmacro
|
|
|
|
;-----------------------------------------------------------------------------
|
|
; void ff_put/avg_h264_chroma_mc2(pixel *dst, const pixel *src, ptrdiff_t stride,
|
|
; int h, int mx, int my)
|
|
;-----------------------------------------------------------------------------
|
|
%macro CHROMA_MC2 1
|
|
cglobal %1_h264_chroma_mc2_10, 6,7
|
|
mov r6d, r4d
|
|
shl r4d, 16
|
|
sub r4d, r6d
|
|
add r4d, 8
|
|
imul r5d, r4d ; x*y<<16 | y*(8-x)
|
|
shl r4d, 3
|
|
sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
|
|
|
|
movd m5, r4d
|
|
movd m6, r5d
|
|
punpckldq m5, m5 ; mm5 = {A,B,A,B}
|
|
punpckldq m6, m6 ; mm6 = {C,D,C,D}
|
|
pxor m7, m7
|
|
pshufw m2, [r1], 0x94 ; mm0 = src[0,1,1,2]
|
|
|
|
.nextrow:
|
|
add r1, r2
|
|
movq m1, m2
|
|
pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
|
|
pshufw m0, [r1], 0x94 ; mm0 = src[0,1,1,2]
|
|
movq m2, m0
|
|
pmaddwd m0, m6
|
|
paddw m1, [pw_32]
|
|
paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
|
|
psrlw m1, 6
|
|
packssdw m1, m7
|
|
CHROMAMC_AVG m1, m3, [r0]
|
|
movd [r0], m1
|
|
add r0, r2
|
|
dec r3d
|
|
jnz .nextrow
|
|
RET
|
|
%endmacro
|
|
|
|
%macro NOTHING 2-3
|
|
%endmacro
|
|
%macro AVG 2-3
|
|
%if %0==3
|
|
movq %2, %3
|
|
%endif
|
|
pavgw %1, %2
|
|
%endmacro
|
|
|
|
%define CHROMAMC_AVG NOTHING
|
|
INIT_XMM sse2
|
|
CHROMA_MC8 put
|
|
%if HAVE_AVX_EXTERNAL
|
|
INIT_XMM avx
|
|
CHROMA_MC8 put
|
|
%endif
|
|
INIT_MMX mmxext
|
|
CHROMA_MC4 put
|
|
CHROMA_MC2 put
|
|
|
|
%define CHROMAMC_AVG AVG
|
|
INIT_XMM sse2
|
|
CHROMA_MC8 avg
|
|
%if HAVE_AVX_EXTERNAL
|
|
INIT_XMM avx
|
|
CHROMA_MC8 avg
|
|
%endif
|
|
INIT_MMX mmxext
|
|
CHROMA_MC4 avg
|
|
CHROMA_MC2 avg
|