1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

swscale/x86/input: add AVX2 optimized RGB24 to YUV functions

rgb24_to_uv_8_c: 39.3
rgb24_to_uv_8_sse2: 14.3
rgb24_to_uv_8_ssse3: 13.3
rgb24_to_uv_8_avx: 12.8
rgb24_to_uv_8_avx2: 14.3
rgb24_to_uv_128_c: 582.8
rgb24_to_uv_128_sse2: 127.3
rgb24_to_uv_128_ssse3: 107.3
rgb24_to_uv_128_avx: 111.3
rgb24_to_uv_128_avx2: 62.3
rgb24_to_uv_1080_c: 4981.3
rgb24_to_uv_1080_sse2: 1048.3
rgb24_to_uv_1080_ssse3: 876.8
rgb24_to_uv_1080_avx: 887.8
rgb24_to_uv_1080_avx2: 492.3
rgb24_to_uv_1280_c: 5906.8
rgb24_to_uv_1280_sse2: 1263.3
rgb24_to_uv_1280_ssse3: 1048.3
rgb24_to_uv_1280_avx: 1045.8
rgb24_to_uv_1280_avx2: 579.8
rgb24_to_uv_1920_c: 8665.3
rgb24_to_uv_1920_sse2: 1888.8
rgb24_to_uv_1920_ssse3: 1571.8
rgb24_to_uv_1920_avx: 1558.8
rgb24_to_uv_1920_avx2: 869.3
rgb24_to_y_8_c: 20.3
rgb24_to_y_8_sse2: 11.8
rgb24_to_y_8_ssse3: 10.3
rgb24_to_y_8_avx: 10.3
rgb24_to_y_8_avx2: 10.8
rgb24_to_y_128_c: 284.8
rgb24_to_y_128_sse2: 83.3
rgb24_to_y_128_ssse3: 66.8
rgb24_to_y_128_avx: 64.8
rgb24_to_y_128_avx2: 39.3
rgb24_to_y_1080_c: 2451.3
rgb24_to_y_1080_sse2: 696.3
rgb24_to_y_1080_ssse3: 516.8
rgb24_to_y_1080_avx: 518.8
rgb24_to_y_1080_avx2: 301.8
rgb24_to_y_1280_c: 2892.8
rgb24_to_y_1280_sse2: 816.8
rgb24_to_y_1280_ssse3: 623.3
rgb24_to_y_1280_avx: 616.3
rgb24_to_y_1280_avx2: 350.8
rgb24_to_y_1920_c: 4338.8
rgb24_to_y_1920_sse2: 1210.8
rgb24_to_y_1920_ssse3: 928.3
rgb24_to_y_1920_avx: 920.3
rgb24_to_y_1920_avx2: 534.8

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2024-06-04 11:53:28 -03:00
parent 6743c2fc6a
commit d5fe99dc5f
2 changed files with 76 additions and 28 deletions

View File

@ -23,7 +23,7 @@
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
SECTION_RODATA 32
%define RY 0x20DE
%define GY 0x4087
@ -90,8 +90,12 @@ rgb_UVrnd: times 4 dd 0x400100 ; 128.5 << 15
; rgba_Vcoeff_ag: times 4 dw 0, GV
shuf_rgb_12x4: db 0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80, \
0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80
shuf_rgb_3x56: db 2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80, \
2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
pd_65535f: times 8 dd 65535.0
pb_pack_shuffle16le: db 0, 1, 4, 5, \
@ -134,8 +138,13 @@ SECTION .text
%macro RGB24_TO_Y_FN 2-3
cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table
%if ARCH_X86_64
%if mmsize == 32
vbroadcasti128 m8, [%2_Ycoeff_12x4]
vbroadcasti128 m9, [%2_Ycoeff_3x56]
%else
mova m8, [%2_Ycoeff_12x4]
mova m9, [%2_Ycoeff_3x56]
%endif
%define coeff1 m8
%define coeff2 m9
%else ; x86-32
@ -165,11 +174,19 @@ cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table
%if notcpuflag(ssse3)
pxor m7, m7
%endif ; !cpuflag(ssse3)
%if mmsize == 32
vbroadcasti128 m4, [rgb_Yrnd]
%else
mova m4, [rgb_Yrnd]
%endif
.loop:
%if cpuflag(ssse3)
movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
movu m2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
movu xm0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
movu xm2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
%if mmsize == 32
vinserti128 m0, m0, [srcq+24], 1
vinserti128 m2, m2, [srcq+36], 1
%endif
pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
pshufb m3, m2, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
@ -212,27 +229,35 @@ cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table
%endmacro
; %1 = nr. of XMM registers
; %2 = rgb or bgr
%macro RGB24_TO_UV_FN 2-3
cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
; %2 = aligned/unaligned output argument
; %3-4 = rgb or bgr
%macro RGB24_TO_UV_FN 3-4
cglobal %3 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
%if ARCH_X86_64
mova m8, [%2_Ucoeff_12x4]
mova m9, [%2_Ucoeff_3x56]
mova m10, [%2_Vcoeff_12x4]
mova m11, [%2_Vcoeff_3x56]
%if mmsize == 32
vbroadcasti128 m8, [%3_Ucoeff_12x4]
vbroadcasti128 m9, [%3_Ucoeff_3x56]
vbroadcasti128 m10, [%3_Vcoeff_12x4]
vbroadcasti128 m11, [%3_Vcoeff_3x56]
%else
mova m8, [%3_Ucoeff_12x4]
mova m9, [%3_Ucoeff_3x56]
mova m10, [%3_Vcoeff_12x4]
mova m11, [%3_Vcoeff_3x56]
%endif
%define coeffU1 m8
%define coeffU2 m9
%define coeffV1 m10
%define coeffV2 m11
%else ; x86-32
%define coeffU1 [%2_Ucoeff_12x4]
%define coeffU2 [%2_Ucoeff_3x56]
%define coeffV1 [%2_Vcoeff_12x4]
%define coeffV2 [%2_Vcoeff_3x56]
%define coeffU1 [%3_Ucoeff_12x4]
%define coeffU2 [%3_Ucoeff_3x56]
%define coeffV1 [%3_Vcoeff_12x4]
%define coeffV2 [%3_Vcoeff_3x56]
%endif ; x86-32/64
%if ARCH_X86_64 && %0 == 3
jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
%else ; ARCH_X86_64 && %0 == 3
%if ARCH_X86_64 && %0 == 4
jmp mangle(private_prefix %+ _ %+ %4 %+ 24ToUV %+ SUFFIX).body
%else ; ARCH_X86_64 && %0 == 4
.body:
%if cpuflag(ssse3)
mova m7, [shuf_rgb_12x4]
@ -253,14 +278,22 @@ cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
add dstUq, wq
add dstVq, wq
neg wq
%if mmsize == 32
vbroadcasti128 m6, [rgb_UVrnd]
%else
mova m6, [rgb_UVrnd]
%endif
%if notcpuflag(ssse3)
pxor m7, m7
%endif
.loop:
%if cpuflag(ssse3)
movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
movu m4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
movu xm0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
movu xm4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
%if mmsize == 32
vinserti128 m0, m0, [srcq+24], 1
vinserti128 m4, m4, [srcq+36], 1
%endif
pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
%else ; !cpuflag(ssse3)
@ -309,32 +342,40 @@ cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
psrad m4, 9
packssdw m0, m1 ; (word) { U[0-7] }
packssdw m2, m4 ; (word) { V[0-7] }
mova [dstUq+wq], m0
mova [dstVq+wq], m2
mov%2 [dstUq+wq], m0
mov%2 [dstVq+wq], m2
add wq, mmsize
jl .loop
RET
%endif ; ARCH_X86_64 && %0 == 3
%endif ; ARCH_X86_64 && %0 == 4
%endmacro
; %1 = nr. of XMM registers for rgb-to-Y func
; %2 = nr. of XMM registers for rgb-to-UV func
%macro RGB24_FUNCS 2
; %3 = aligned/unaligned output argument
%macro RGB24_FUNCS 3
RGB24_TO_Y_FN %1, rgb
RGB24_TO_Y_FN %1, bgr, rgb
RGB24_TO_UV_FN %2, rgb
RGB24_TO_UV_FN %2, bgr, rgb
RGB24_TO_UV_FN %2, %3, rgb
RGB24_TO_UV_FN %2, %3, bgr, rgb
%endmacro
INIT_XMM sse2
RGB24_FUNCS 10, 12
RGB24_FUNCS 10, 12, a
INIT_XMM ssse3
RGB24_FUNCS 11, 13
RGB24_FUNCS 11, 13, a
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
RGB24_FUNCS 11, 13
RGB24_FUNCS 11, 13, a
%endif
%if ARCH_X86_64
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
RGB24_FUNCS 11, 13, u
%endif
%endif
; %1 = nr. of XMM registers

View File

@ -321,6 +321,8 @@ void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \
INPUT_FUNCS(sse2);
INPUT_FUNCS(ssse3);
INPUT_FUNCS(avx);
INPUT_FUNC(rgb24, avx2);
INPUT_FUNC(bgr24, avx2);
#if ARCH_X86_64
#define YUV2NV_DECL(fmt, opt) \
@ -634,6 +636,11 @@ switch(c->dstBpc){ \
}
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
if (ARCH_X86_64)
switch (c->srcFormat) {
case_rgb(rgb24, RGB24, avx2);
case_rgb(bgr24, BGR24, avx2);
}
switch (c->dstFormat) {
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_NV24: