mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-28 20:53:54 +02:00
avcodec/x86/vvc: add alf filter luma and chroma avx2 optimizations
ff_vvc_alf_filter_luma_4x4_10_c: 135 ff_vvc_alf_filter_luma_4x4_10_avx2: 54 ff_vvc_alf_filter_luma_4x8_10_c: 268 ff_vvc_alf_filter_luma_4x8_10_avx2: 106 ff_vvc_alf_filter_luma_4x12_10_c: 400 ff_vvc_alf_filter_luma_4x12_10_avx2: 160 ff_vvc_alf_filter_luma_4x16_10_c: 535 ff_vvc_alf_filter_luma_4x16_10_avx2: 213 ff_vvc_alf_filter_luma_4x20_10_c: 646 ff_vvc_alf_filter_luma_4x20_10_avx2: 262 ff_vvc_alf_filter_luma_4x24_10_c: 783 ff_vvc_alf_filter_luma_4x24_10_avx2: 309 ff_vvc_alf_filter_luma_4x28_10_c: 908 ff_vvc_alf_filter_luma_4x28_10_avx2: 361 ff_vvc_alf_filter_luma_4x32_10_c: 1039 ff_vvc_alf_filter_luma_4x32_10_avx2: 412 ff_vvc_alf_filter_luma_8x4_10_c: 260 ff_vvc_alf_filter_luma_8x4_10_avx2: 53 ff_vvc_alf_filter_luma_8x8_10_c: 516 ff_vvc_alf_filter_luma_8x8_10_avx2: 105 ff_vvc_alf_filter_luma_8x12_10_c: 779 ff_vvc_alf_filter_luma_8x12_10_avx2: 157 ff_vvc_alf_filter_luma_8x16_10_c: 1038 ff_vvc_alf_filter_luma_8x16_10_avx2: 210 ff_vvc_alf_filter_luma_8x20_10_c: 1293 ff_vvc_alf_filter_luma_8x20_10_avx2: 259 ff_vvc_alf_filter_luma_8x24_10_c: 1553 ff_vvc_alf_filter_luma_8x24_10_avx2: 309 ff_vvc_alf_filter_luma_8x28_10_c: 1815 ff_vvc_alf_filter_luma_8x28_10_avx2: 361 ff_vvc_alf_filter_luma_8x32_10_c: 2067 ff_vvc_alf_filter_luma_8x32_10_avx2: 419 ff_vvc_alf_filter_luma_12x4_10_c: 390 ff_vvc_alf_filter_luma_12x4_10_avx2: 54 ff_vvc_alf_filter_luma_12x8_10_c: 773 ff_vvc_alf_filter_luma_12x8_10_avx2: 107 ff_vvc_alf_filter_luma_12x12_10_c: 1159 ff_vvc_alf_filter_luma_12x12_10_avx2: 155 ff_vvc_alf_filter_luma_12x16_10_c: 1550 ff_vvc_alf_filter_luma_12x16_10_avx2: 207 ff_vvc_alf_filter_luma_12x20_10_c: 1970 ff_vvc_alf_filter_luma_12x20_10_avx2: 260 ff_vvc_alf_filter_luma_12x24_10_c: 2379 ff_vvc_alf_filter_luma_12x24_10_avx2: 309 ff_vvc_alf_filter_luma_12x28_10_c: 2763 ff_vvc_alf_filter_luma_12x28_10_avx2: 362 ff_vvc_alf_filter_luma_12x32_10_c: 3158 ff_vvc_alf_filter_luma_12x32_10_avx2: 419 ff_vvc_alf_filter_luma_16x4_10_c: 523 ff_vvc_alf_filter_luma_16x4_10_avx2: 53 ff_vvc_alf_filter_luma_16x8_10_c: 1049 ff_vvc_alf_filter_luma_16x8_10_avx2: 103 ff_vvc_alf_filter_luma_16x12_10_c: 1566 ff_vvc_alf_filter_luma_16x12_10_avx2: 159 ff_vvc_alf_filter_luma_16x16_10_c: 2078 ff_vvc_alf_filter_luma_16x16_10_avx2: 211 ff_vvc_alf_filter_luma_16x20_10_c: 2631 ff_vvc_alf_filter_luma_16x20_10_avx2: 259 ff_vvc_alf_filter_luma_16x24_10_c: 3149 ff_vvc_alf_filter_luma_16x24_10_avx2: 316 ff_vvc_alf_filter_luma_16x28_10_c: 3631 ff_vvc_alf_filter_luma_16x28_10_avx2: 359 ff_vvc_alf_filter_luma_16x32_10_c: 4233 ff_vvc_alf_filter_luma_16x32_10_avx2: 428 ff_vvc_alf_filter_luma_20x4_10_c: 649 ff_vvc_alf_filter_luma_20x4_10_avx2: 106 ff_vvc_alf_filter_luma_20x8_10_c: 1294 ff_vvc_alf_filter_luma_20x8_10_avx2: 206 ff_vvc_alf_filter_luma_20x12_10_c: 1936 ff_vvc_alf_filter_luma_20x12_10_avx2: 310 ff_vvc_alf_filter_luma_20x16_10_c: 2594 ff_vvc_alf_filter_luma_20x16_10_avx2: 411 ff_vvc_alf_filter_luma_20x20_10_c: 3234 ff_vvc_alf_filter_luma_20x20_10_avx2: 517 ff_vvc_alf_filter_luma_20x24_10_c: 3894 ff_vvc_alf_filter_luma_20x24_10_avx2: 621 ff_vvc_alf_filter_luma_20x28_10_c: 4542 ff_vvc_alf_filter_luma_20x28_10_avx2: 722 ff_vvc_alf_filter_luma_20x32_10_c: 5205 ff_vvc_alf_filter_luma_20x32_10_avx2: 832 ff_vvc_alf_filter_luma_24x4_10_c: 774 ff_vvc_alf_filter_luma_24x4_10_avx2: 104 ff_vvc_alf_filter_luma_24x8_10_c: 1546 ff_vvc_alf_filter_luma_24x8_10_avx2: 206 ff_vvc_alf_filter_luma_24x12_10_c: 2318 ff_vvc_alf_filter_luma_24x12_10_avx2: 312 ff_vvc_alf_filter_luma_24x16_10_c: 3104 ff_vvc_alf_filter_luma_24x16_10_avx2: 411 ff_vvc_alf_filter_luma_24x20_10_c: 3893 ff_vvc_alf_filter_luma_24x20_10_avx2: 513 ff_vvc_alf_filter_luma_24x24_10_c: 4681 ff_vvc_alf_filter_luma_24x24_10_avx2: 616 ff_vvc_alf_filter_luma_24x28_10_c: 5474 ff_vvc_alf_filter_luma_24x28_10_avx2: 721 ff_vvc_alf_filter_luma_24x32_10_c: 6271 ff_vvc_alf_filter_luma_24x32_10_avx2: 832 ff_vvc_alf_filter_luma_28x4_10_c: 907 ff_vvc_alf_filter_luma_28x4_10_avx2: 103 ff_vvc_alf_filter_luma_28x8_10_c: 1797 ff_vvc_alf_filter_luma_28x8_10_avx2: 206 ff_vvc_alf_filter_luma_28x12_10_c: 2708 ff_vvc_alf_filter_luma_28x12_10_avx2: 309 ff_vvc_alf_filter_luma_28x16_10_c: 3632 ff_vvc_alf_filter_luma_28x16_10_avx2: 413 ff_vvc_alf_filter_luma_28x20_10_c: 4537 ff_vvc_alf_filter_luma_28x20_10_avx2: 519 ff_vvc_alf_filter_luma_28x24_10_c: 5463 ff_vvc_alf_filter_luma_28x24_10_avx2: 616 ff_vvc_alf_filter_luma_28x28_10_c: 6372 ff_vvc_alf_filter_luma_28x28_10_avx2: 719 ff_vvc_alf_filter_luma_28x32_10_c: 7274 ff_vvc_alf_filter_luma_28x32_10_avx2: 823 ff_vvc_alf_filter_luma_32x4_10_c: 1029 ff_vvc_alf_filter_luma_32x4_10_avx2: 104 ff_vvc_alf_filter_luma_32x8_10_c: 2060 ff_vvc_alf_filter_luma_32x8_10_avx2: 206 ff_vvc_alf_filter_luma_32x12_10_c: 3112 ff_vvc_alf_filter_luma_32x12_10_avx2: 307 ff_vvc_alf_filter_luma_32x16_10_c: 4161 ff_vvc_alf_filter_luma_32x16_10_avx2: 413 ff_vvc_alf_filter_luma_32x20_10_c: 5211 ff_vvc_alf_filter_luma_32x20_10_avx2: 514 ff_vvc_alf_filter_luma_32x24_10_c: 6238 ff_vvc_alf_filter_luma_32x24_10_avx2: 614 ff_vvc_alf_filter_luma_32x28_10_c: 7261 ff_vvc_alf_filter_luma_32x28_10_avx2: 720 ff_vvc_alf_filter_luma_32x32_10_c: 8312 ff_vvc_alf_filter_luma_32x32_10_avx2: 819 ff_vvc_alf_filter_chroma_4x4_10_c: 70 ff_vvc_alf_filter_chroma_4x4_10_avx2: 53 ff_vvc_alf_filter_chroma_4x8_10_c: 139 ff_vvc_alf_filter_chroma_4x8_10_avx2: 104 ff_vvc_alf_filter_chroma_4x12_10_c: 208 ff_vvc_alf_filter_chroma_4x12_10_avx2: 155 ff_vvc_alf_filter_chroma_4x16_10_c: 275 ff_vvc_alf_filter_chroma_4x16_10_avx2: 218 ff_vvc_alf_filter_chroma_4x20_10_c: 344 ff_vvc_alf_filter_chroma_4x20_10_avx2: 257 ff_vvc_alf_filter_chroma_4x24_10_c: 411 ff_vvc_alf_filter_chroma_4x24_10_avx2: 309 ff_vvc_alf_filter_chroma_4x28_10_c: 481 ff_vvc_alf_filter_chroma_4x28_10_avx2: 361 ff_vvc_alf_filter_chroma_4x32_10_c: 545 ff_vvc_alf_filter_chroma_4x32_10_avx2: 411 ff_vvc_alf_filter_chroma_8x4_10_c: 138 ff_vvc_alf_filter_chroma_8x4_10_avx2: 53 ff_vvc_alf_filter_chroma_8x8_10_c: 274 ff_vvc_alf_filter_chroma_8x8_10_avx2: 106 ff_vvc_alf_filter_chroma_8x12_10_c: 422 ff_vvc_alf_filter_chroma_8x12_10_avx2: 158 ff_vvc_alf_filter_chroma_8x16_10_c: 545 ff_vvc_alf_filter_chroma_8x16_10_avx2: 206 ff_vvc_alf_filter_chroma_8x20_10_c: 683 ff_vvc_alf_filter_chroma_8x20_10_avx2: 257 ff_vvc_alf_filter_chroma_8x24_10_c: 816 ff_vvc_alf_filter_chroma_8x24_10_avx2: 312 ff_vvc_alf_filter_chroma_8x28_10_c: 951 ff_vvc_alf_filter_chroma_8x28_10_avx2: 359 ff_vvc_alf_filter_chroma_8x32_10_c: 1098 ff_vvc_alf_filter_chroma_8x32_10_avx2: 409 ff_vvc_alf_filter_chroma_12x4_10_c: 204 ff_vvc_alf_filter_chroma_12x4_10_avx2: 53 ff_vvc_alf_filter_chroma_12x8_10_c: 410 ff_vvc_alf_filter_chroma_12x8_10_avx2: 104 ff_vvc_alf_filter_chroma_12x12_10_c: 614 ff_vvc_alf_filter_chroma_12x12_10_avx2: 155 ff_vvc_alf_filter_chroma_12x16_10_c: 814 ff_vvc_alf_filter_chroma_12x16_10_avx2: 210 ff_vvc_alf_filter_chroma_12x20_10_c: 1017 ff_vvc_alf_filter_chroma_12x20_10_avx2: 258 ff_vvc_alf_filter_chroma_12x24_10_c: 1221 ff_vvc_alf_filter_chroma_12x24_10_avx2: 308 ff_vvc_alf_filter_chroma_12x28_10_c: 1423 ff_vvc_alf_filter_chroma_12x28_10_avx2: 366 ff_vvc_alf_filter_chroma_12x32_10_c: 1624 ff_vvc_alf_filter_chroma_12x32_10_avx2: 410 ff_vvc_alf_filter_chroma_16x4_10_c: 272 ff_vvc_alf_filter_chroma_16x4_10_avx2: 52 ff_vvc_alf_filter_chroma_16x8_10_c: 541 ff_vvc_alf_filter_chroma_16x8_10_avx2: 105 ff_vvc_alf_filter_chroma_16x12_10_c: 812 ff_vvc_alf_filter_chroma_16x12_10_avx2: 155 ff_vvc_alf_filter_chroma_16x16_10_c: 1091 ff_vvc_alf_filter_chroma_16x16_10_avx2: 206 ff_vvc_alf_filter_chroma_16x20_10_c: 1354 ff_vvc_alf_filter_chroma_16x20_10_avx2: 257 ff_vvc_alf_filter_chroma_16x24_10_c: 1637 ff_vvc_alf_filter_chroma_16x24_10_avx2: 313 ff_vvc_alf_filter_chroma_16x28_10_c: 1899 ff_vvc_alf_filter_chroma_16x28_10_avx2: 359 ff_vvc_alf_filter_chroma_16x32_10_c: 2161 ff_vvc_alf_filter_chroma_16x32_10_avx2: 410 ff_vvc_alf_filter_chroma_20x4_10_c: 339 ff_vvc_alf_filter_chroma_20x4_10_avx2: 103 ff_vvc_alf_filter_chroma_20x8_10_c: 681 ff_vvc_alf_filter_chroma_20x8_10_avx2: 207 ff_vvc_alf_filter_chroma_20x12_10_c: 1013 ff_vvc_alf_filter_chroma_20x12_10_avx2: 307 ff_vvc_alf_filter_chroma_20x16_10_c: 1349 ff_vvc_alf_filter_chroma_20x16_10_avx2: 415 ff_vvc_alf_filter_chroma_20x20_10_c: 1685 ff_vvc_alf_filter_chroma_20x20_10_avx2: 522 ff_vvc_alf_filter_chroma_20x24_10_c: 2037 ff_vvc_alf_filter_chroma_20x24_10_avx2: 622 ff_vvc_alf_filter_chroma_20x28_10_c: 2380 ff_vvc_alf_filter_chroma_20x28_10_avx2: 733 ff_vvc_alf_filter_chroma_20x32_10_c: 2712 ff_vvc_alf_filter_chroma_20x32_10_avx2: 838 ff_vvc_alf_filter_chroma_24x4_10_c: 408 ff_vvc_alf_filter_chroma_24x4_10_avx2: 104 ff_vvc_alf_filter_chroma_24x8_10_c: 818 ff_vvc_alf_filter_chroma_24x8_10_avx2: 207 ff_vvc_alf_filter_chroma_24x12_10_c: 1219 ff_vvc_alf_filter_chroma_24x12_10_avx2: 308 ff_vvc_alf_filter_chroma_24x16_10_c: 1648 ff_vvc_alf_filter_chroma_24x16_10_avx2: 420 ff_vvc_alf_filter_chroma_24x20_10_c: 2061 ff_vvc_alf_filter_chroma_24x20_10_avx2: 525 ff_vvc_alf_filter_chroma_24x24_10_c: 2437 ff_vvc_alf_filter_chroma_24x24_10_avx2: 617 ff_vvc_alf_filter_chroma_24x28_10_c: 2832 ff_vvc_alf_filter_chroma_24x28_10_avx2: 722 ff_vvc_alf_filter_chroma_24x32_10_c: 3271 ff_vvc_alf_filter_chroma_24x32_10_avx2: 830 ff_vvc_alf_filter_chroma_28x4_10_c: 476 ff_vvc_alf_filter_chroma_28x4_10_avx2: 104 ff_vvc_alf_filter_chroma_28x8_10_c: 948 ff_vvc_alf_filter_chroma_28x8_10_avx2: 205 ff_vvc_alf_filter_chroma_28x12_10_c: 1420 ff_vvc_alf_filter_chroma_28x12_10_avx2: 310 ff_vvc_alf_filter_chroma_28x16_10_c: 1889 ff_vvc_alf_filter_chroma_28x16_10_avx2: 423 ff_vvc_alf_filter_chroma_28x20_10_c: 2372 ff_vvc_alf_filter_chroma_28x20_10_avx2: 513 ff_vvc_alf_filter_chroma_28x24_10_c: 2843 ff_vvc_alf_filter_chroma_28x24_10_avx2: 618 ff_vvc_alf_filter_chroma_28x28_10_c: 3307 ff_vvc_alf_filter_chroma_28x28_10_avx2: 724 ff_vvc_alf_filter_chroma_28x32_10_c: 3801 ff_vvc_alf_filter_chroma_28x32_10_avx2: 827 ff_vvc_alf_filter_chroma_32x4_10_c: 543 ff_vvc_alf_filter_chroma_32x4_10_avx2: 105 ff_vvc_alf_filter_chroma_32x8_10_c: 1084 ff_vvc_alf_filter_chroma_32x8_10_avx2: 206 ff_vvc_alf_filter_chroma_32x12_10_c: 1621 ff_vvc_alf_filter_chroma_32x12_10_avx2: 309 ff_vvc_alf_filter_chroma_32x16_10_c: 2173 ff_vvc_alf_filter_chroma_32x16_10_avx2: 408 ff_vvc_alf_filter_chroma_32x20_10_c: 2703 ff_vvc_alf_filter_chroma_32x20_10_avx2: 513 ff_vvc_alf_filter_chroma_32x24_10_c: 3245 ff_vvc_alf_filter_chroma_32x24_10_avx2: 612 ff_vvc_alf_filter_chroma_32x28_10_c: 3795 ff_vvc_alf_filter_chroma_32x28_10_avx2: 722 ff_vvc_alf_filter_chroma_32x32_10_c: 4339 ff_vvc_alf_filter_chroma_32x32_10_avx2: 820 Signed-off-by: Wu Jianhua <toqsxw@outlook.com>
This commit is contained in:
parent
cdcb4b98b7
commit
ec2a7ef867
@ -3,5 +3,6 @@ clean::
|
||||
|
||||
OBJS-$(CONFIG_VVC_DECODER) += x86/vvc/vvcdsp_init.o \
|
||||
x86/h26x/h2656dsp.o
|
||||
X86ASM-OBJS-$(CONFIG_VVC_DECODER) += x86/vvc/vvc_mc.o \
|
||||
X86ASM-OBJS-$(CONFIG_VVC_DECODER) += x86/vvc/vvc_alf.o \
|
||||
x86/vvc/vvc_mc.o \
|
||||
x86/h26x/h2656_inter.o
|
||||
|
441
libavcodec/x86/vvc/vvc_alf.asm
Normal file
441
libavcodec/x86/vvc/vvc_alf.asm
Normal file
@ -0,0 +1,441 @@
|
||||
;******************************************************************************
|
||||
;* VVC Adaptive Loop Filter SIMD optimizations
|
||||
;*
|
||||
;* Copyright (c) 2023-2024 Nuo Mi <nuomi2021@gmail.com>
|
||||
;* Copyright (c) 2023-2024 Wu Jianhua <toqsxw@outlook.com>
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "libavutil/x86/x86util.asm"
|
||||
|
||||
SECTION_RODATA
|
||||
|
||||
%macro PARAM_SHUFFE 1
|
||||
%assign i (%1 * 2)
|
||||
%assign j ((i + 1) << 8) + (i)
|
||||
param_shuffe_ %+ %1:
|
||||
%rep 2
|
||||
times 4 dw j
|
||||
times 4 dw (j + 0x0808)
|
||||
%endrep
|
||||
%endmacro
|
||||
|
||||
PARAM_SHUFFE 0
|
||||
PARAM_SHUFFE 1
|
||||
PARAM_SHUFFE 2
|
||||
PARAM_SHUFFE 3
|
||||
|
||||
dd448: times 8 dd 512 - 64
|
||||
dw64: times 8 dd 64
|
||||
|
||||
SECTION .text
|
||||
|
||||
|
||||
%define ALF_NUM_COEFF_LUMA 12
|
||||
%define ALF_NUM_COEFF_CHROMA 6
|
||||
%define ALF_NUM_COEFF_CC 7
|
||||
|
||||
;%1-%3 out
|
||||
;%4 clip or filter
|
||||
%macro LOAD_LUMA_PARAMS_W16 4
|
||||
lea offsetq, [3 * xq] ;xq * ALF_NUM_COEFF_LUMA / ALF_BLOCK_SIZE
|
||||
movu m%1, [%4q + 2 * offsetq + 0 * 32] ; 2 * for sizeof(int16_t)
|
||||
movu m%2, [%4q + 2 * offsetq + 1 * 32]
|
||||
movu m%3, [%4q + 2 * offsetq + 2 * 32]
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_LUMA_PARAMS_W16 6
|
||||
LOAD_LUMA_PARAMS_W16 %1, %2, %3, %4
|
||||
;m%1 = 03 02 01 00
|
||||
;m%2 = 07 06 05 04
|
||||
;m%3 = 11 10 09 08
|
||||
|
||||
vshufpd m%5, m%1, m%2, 0011b ;06 02 05 01
|
||||
vshufpd m%6, m%3, m%5, 1001b ;06 10 01 09
|
||||
|
||||
vshufpd m%1, m%1, m%6, 1100b ;06 03 09 00
|
||||
vshufpd m%2, m%2, m%6, 0110b ;10 07 01 04
|
||||
vshufpd m%3, m%3, m%5, 0110b ;02 11 05 08
|
||||
|
||||
vpermpd m%1, m%1, 01111000b ;09 06 03 00
|
||||
vshufpd m%2, m%2, m%2, 1001b ;10 07 04 01
|
||||
vpermpd m%3, m%3, 10000111b ;11 08 05 02
|
||||
%endmacro
|
||||
|
||||
; %1-%3 out
|
||||
; %4 clip or filter
|
||||
; %5-%6 tmp
|
||||
%macro LOAD_LUMA_PARAMS 6
|
||||
LOAD_LUMA_PARAMS_W16 %1, %2, %3, %4, %5, %6
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_CHROMA_PARAMS 4
|
||||
; LOAD_CHROMA_PARAMS_W %+ WIDTH %1, %2, %3, %4
|
||||
movq xm%1, [%3q]
|
||||
movd xm%2, [%3q + 8]
|
||||
vpbroadcastq m%1, xm%1
|
||||
vpbroadcastq m%2, xm%2
|
||||
%endmacro
|
||||
|
||||
%macro LOAD_PARAMS 0
|
||||
%if LUMA
|
||||
LOAD_LUMA_PARAMS 3, 4, 5, filter, 6, 7
|
||||
LOAD_LUMA_PARAMS 6, 7, 8, clip, 9, 10
|
||||
%else
|
||||
LOAD_CHROMA_PARAMS 3, 4, filter, 5
|
||||
LOAD_CHROMA_PARAMS 6, 7, clip, 8
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
; FILTER(param_idx)
|
||||
; input: m2, m9, m10
|
||||
; output: m0, m1
|
||||
; tmp: m11-m13
|
||||
%macro FILTER 1
|
||||
%assign i (%1 % 4)
|
||||
%assign j (%1 / 4 + 3)
|
||||
%assign k (%1 / 4 + 6)
|
||||
%define filters m %+ j
|
||||
%define clips m %+ k
|
||||
|
||||
pshufb m12, clips, [param_shuffe_ %+ i] ;clip
|
||||
pxor m11, m11
|
||||
psubw m11, m12 ;-clip
|
||||
|
||||
vpsubw m9, m2
|
||||
CLIPW m9, m11, m12
|
||||
|
||||
vpsubw m10, m2
|
||||
CLIPW m10, m11, m12
|
||||
|
||||
vpunpckhwd m13, m9, m10
|
||||
vpunpcklwd m9, m9, m10
|
||||
|
||||
pshufb m12, filters, [param_shuffe_ %+ i] ;filter
|
||||
vpunpcklwd m10, m12, m12
|
||||
vpunpckhwd m12, m12, m12
|
||||
|
||||
vpmaddwd m9, m10
|
||||
vpmaddwd m12, m13
|
||||
|
||||
paddd m0, m9
|
||||
paddd m1, m12
|
||||
%endmacro
|
||||
|
||||
; FILTER(param_idx, bottom, top, byte_offset)
|
||||
; input: param_idx, bottom, top, byte_offset
|
||||
; output: m0, m1
|
||||
; temp: m9, m10
|
||||
%macro FILTER 4
|
||||
LOAD_PIXELS m10, [%2 + %4]
|
||||
LOAD_PIXELS m9, [%3 - %4]
|
||||
FILTER %1
|
||||
%endmacro
|
||||
|
||||
; GET_SRCS(line)
|
||||
; brief: get source lines
|
||||
; input: src, src_stride, vb_pos
|
||||
; output: s1...s6
|
||||
%macro GET_SRCS 1
|
||||
lea s1q, [srcq + src_strideq]
|
||||
lea s3q, [s1q + src_strideq]
|
||||
%if LUMA
|
||||
lea s5q, [s3q + src_strideq]
|
||||
%endif
|
||||
neg src_strideq
|
||||
lea s2q, [srcq + src_strideq]
|
||||
lea s4q, [s2q + src_strideq]
|
||||
%if LUMA
|
||||
lea s6q, [s4q + src_strideq]
|
||||
%endif
|
||||
neg src_strideq
|
||||
|
||||
%if LUMA
|
||||
cmp vb_posq, 0
|
||||
je %%vb_bottom
|
||||
cmp vb_posq, 4
|
||||
jne %%vb_end
|
||||
%else
|
||||
cmp vb_posq, 2
|
||||
jne %%vb_end
|
||||
cmp %1, 2
|
||||
jge %%vb_bottom
|
||||
%endif
|
||||
|
||||
%%vb_above:
|
||||
; above
|
||||
; p1 = (y + i == vb_pos - 1) ? p0 : p1;
|
||||
; p2 = (y + i == vb_pos - 1) ? p0 : p2;
|
||||
; p3 = (y + i >= vb_pos - 2) ? p1 : p3;
|
||||
; p4 = (y + i >= vb_pos - 2) ? p2 : p4;
|
||||
; p5 = (y + i >= vb_pos - 3) ? p3 : p5;
|
||||
; p6 = (y + i >= vb_pos - 3) ? p4 : p6;
|
||||
dec vb_posq
|
||||
cmp vb_posq, %1
|
||||
cmove s1q, srcq
|
||||
cmove s2q, srcq
|
||||
|
||||
dec vb_posq
|
||||
cmp vb_posq, %1
|
||||
cmovbe s3q, s1q
|
||||
cmovbe s4q, s2q
|
||||
|
||||
dec vb_posq
|
||||
%if LUMA
|
||||
cmp vb_posq, %1
|
||||
cmovbe s5q, s3q
|
||||
cmovbe s6q, s4q
|
||||
%endif
|
||||
add vb_posq, 3
|
||||
jmp %%vb_end
|
||||
|
||||
%%vb_bottom:
|
||||
; bottom
|
||||
; p1 = (y + i == vb_pos ) ? p0 : p1;
|
||||
; p2 = (y + i == vb_pos ) ? p0 : p2;
|
||||
; p3 = (y + i <= vb_pos + 1) ? p1 : p3;
|
||||
; p4 = (y + i <= vb_pos + 1) ? p2 : p4;
|
||||
; p5 = (y + i <= vb_pos + 2) ? p3 : p5;
|
||||
; p6 = (y + i <= vb_pos + 2) ? p4 : p6;
|
||||
cmp vb_posq, %1
|
||||
cmove s1q, srcq
|
||||
cmove s2q, srcq
|
||||
|
||||
inc vb_posq
|
||||
cmp vb_posq, %1
|
||||
cmovae s3q, s1q
|
||||
cmovae s4q, s2q
|
||||
|
||||
inc vb_posq
|
||||
%if LUMA
|
||||
cmp vb_posq, %1
|
||||
cmovae s5q, s3q
|
||||
cmovae s6q, s4q
|
||||
%endif
|
||||
sub vb_posq, 2
|
||||
%%vb_end:
|
||||
%endmacro
|
||||
|
||||
; SHIFT_VB(line)
|
||||
; brief: shift filter result
|
||||
; input: m0, m1, vb_pos
|
||||
; output: m0
|
||||
; temp: m9
|
||||
%macro SHIFT_VB 1
|
||||
%define SHIFT 7
|
||||
%if LUMA
|
||||
cmp %1, 3
|
||||
je %%near_above
|
||||
cmp %1, 0
|
||||
je %%near_below
|
||||
jmp %%no_vb
|
||||
%%near_above:
|
||||
cmp vb_posq, 4
|
||||
je %%near_vb
|
||||
jmp %%no_vb
|
||||
%%near_below:
|
||||
cmp vb_posq, 0
|
||||
je %%near_vb
|
||||
%else
|
||||
cmp %1, 0
|
||||
je %%no_vb
|
||||
cmp %1, 3
|
||||
je %%no_vb
|
||||
cmp vb_posq, 2
|
||||
je %%near_vb
|
||||
%endif
|
||||
%%no_vb:
|
||||
vpsrad m0, SHIFT
|
||||
vpsrad m1, SHIFT
|
||||
jmp %%shift_end
|
||||
%%near_vb:
|
||||
vpbroadcastd m9, [dd448]
|
||||
paddd m0, m9
|
||||
paddd m1, m9
|
||||
vpsrad m0, SHIFT + 3
|
||||
vpsrad m1, SHIFT + 3
|
||||
%%shift_end:
|
||||
vpackssdw m0, m0, m1
|
||||
%endmacro
|
||||
|
||||
; FILTER_VB(line)
|
||||
; brief: filter pixels for luma and chroma
|
||||
; input: line
|
||||
; output: m0, m1
|
||||
; temp: s0q...s1q
|
||||
%macro FILTER_VB 1
|
||||
vpbroadcastd m0, [dw64]
|
||||
vpbroadcastd m1, [dw64]
|
||||
|
||||
GET_SRCS %1
|
||||
%if LUMA
|
||||
FILTER 0, s5q, s6q, 0 * ps
|
||||
FILTER 1, s3q, s4q, 1 * ps
|
||||
FILTER 2, s3q, s4q, 0 * ps
|
||||
FILTER 3, s3q, s4q, -1 * ps
|
||||
FILTER 4, s1q, s2q, 2 * ps
|
||||
FILTER 5, s1q, s2q, 1 * ps
|
||||
FILTER 6, s1q, s2q, 0 * ps
|
||||
FILTER 7, s1q, s2q, -1 * ps
|
||||
FILTER 8, s1q, s2q, -2 * ps
|
||||
FILTER 9, srcq, srcq, 3 * ps
|
||||
FILTER 10, srcq, srcq, 2 * ps
|
||||
FILTER 11, srcq, srcq, 1 * ps
|
||||
%else
|
||||
FILTER 0, s3q, s4q, 0 * ps
|
||||
FILTER 1, s1q, s2q, 1 * ps
|
||||
FILTER 2, s1q, s2q, 0 * ps
|
||||
FILTER 3, s1q, s2q, -1 * ps
|
||||
FILTER 4, srcq, srcq, 2 * ps
|
||||
FILTER 5, srcq, srcq, 1 * ps
|
||||
%endif
|
||||
SHIFT_VB %1
|
||||
%endmacro
|
||||
|
||||
; LOAD_PIXELS(dest, src)
|
||||
%macro LOAD_PIXELS 2
|
||||
%if ps == 2
|
||||
movu %1, %2
|
||||
%else
|
||||
vpmovzxbw %1, %2
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
; STORE_PIXELS(dst, src)
|
||||
%macro STORE_PIXELS 2
|
||||
%if ps == 2
|
||||
movu %1, m%2
|
||||
%else
|
||||
vpackuswb m%2, m%2
|
||||
vpermq m%2, m%2, 0x8
|
||||
movu %1, xm%2
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
%macro FILTER_16x4 0
|
||||
%if LUMA
|
||||
push clipq
|
||||
push strideq
|
||||
%define s1q clipq
|
||||
%define s2q strideq
|
||||
%else
|
||||
%define s1q s5q
|
||||
%define s2q s6q
|
||||
%endif
|
||||
|
||||
%define s3q pixel_maxq
|
||||
%define s4q offsetq
|
||||
push xq
|
||||
|
||||
xor xq, xq
|
||||
%%filter_16x4_loop:
|
||||
LOAD_PIXELS m2, [srcq] ;p0
|
||||
|
||||
FILTER_VB xq
|
||||
|
||||
paddw m0, m2
|
||||
|
||||
; clip to pixel
|
||||
CLIPW m0, m14, m15
|
||||
|
||||
STORE_PIXELS [dstq], 0
|
||||
|
||||
lea srcq, [srcq + src_strideq]
|
||||
lea dstq, [dstq + dst_strideq]
|
||||
inc xq
|
||||
cmp xq, 4
|
||||
jl %%filter_16x4_loop
|
||||
|
||||
mov xq, src_strideq
|
||||
neg xq
|
||||
lea srcq, [srcq + xq * 4]
|
||||
mov xq, dst_strideq
|
||||
neg xq
|
||||
lea dstq, [dstq + xq * 4]
|
||||
|
||||
pop xq
|
||||
|
||||
%if LUMA
|
||||
pop strideq
|
||||
pop clipq
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
; FILTER(bpc, luma/chroma)
|
||||
%macro ALF_FILTER 2
|
||||
%xdefine BPC %1
|
||||
%ifidn %2, luma
|
||||
%xdefine LUMA 1
|
||||
%else
|
||||
%xdefine LUMA 0
|
||||
%endif
|
||||
|
||||
; ******************************
|
||||
; void vvc_alf_filter_%2_%1bpc_avx2(uint8_t *dst, ptrdiff_t dst_stride,
|
||||
; const uint8_t *src, ptrdiff_t src_stride, const ptrdiff_t width, cosnt ptr_diff_t height,
|
||||
; const int16_t *filter, const int16_t *clip, ptrdiff_t stride, ptrdiff_t vb_pos, ptrdiff_t pixel_max);
|
||||
; ******************************
|
||||
cglobal vvc_alf_filter_%2_%1bpc, 11, 15, 16, 0-0x28, dst, dst_stride, src, src_stride, width, height, filter, clip, stride, vb_pos, pixel_max, \
|
||||
offset, x, s5, s6
|
||||
%define ps (%1 / 8) ; pixel size
|
||||
movd xm15, pixel_maxd
|
||||
vpbroadcastw m15, xm15
|
||||
pxor m14, m14
|
||||
|
||||
.loop:
|
||||
push srcq
|
||||
push dstq
|
||||
xor xd, xd
|
||||
|
||||
.loop_w:
|
||||
LOAD_PARAMS
|
||||
FILTER_16x4
|
||||
|
||||
add srcq, 16 * ps
|
||||
add dstq, 16 * ps
|
||||
add xd, 16
|
||||
cmp xd, widthd
|
||||
jl .loop_w
|
||||
|
||||
pop dstq
|
||||
pop srcq
|
||||
lea srcq, [srcq + 4 * src_strideq]
|
||||
lea dstq, [dstq + 4 * dst_strideq]
|
||||
|
||||
lea filterq, [filterq + 2 * strideq]
|
||||
lea clipq, [clipq + 2 * strideq]
|
||||
|
||||
sub vb_posq, 4
|
||||
sub heightq, 4
|
||||
jg .loop
|
||||
RET
|
||||
%endmacro
|
||||
|
||||
; FILTER(bpc)
|
||||
%macro ALF_FILTER 1
|
||||
ALF_FILTER %1, luma
|
||||
ALF_FILTER %1, chroma
|
||||
%endmacro
|
||||
|
||||
%if ARCH_X86_64
|
||||
%if HAVE_AVX2_EXTERNAL
|
||||
INIT_YMM avx2
|
||||
ALF_FILTER 16
|
||||
ALF_FILTER 8
|
||||
%endif
|
||||
%endif
|
@ -87,6 +87,27 @@ AVG_PROTOTYPES( 8, avx2)
|
||||
AVG_PROTOTYPES(10, avx2)
|
||||
AVG_PROTOTYPES(12, avx2)
|
||||
|
||||
#define ALF_BPC_PROTOTYPES(bpc, opt) \
|
||||
void BF(ff_vvc_alf_filter_luma, bpc, opt)(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *src, ptrdiff_t src_stride, ptrdiff_t width, ptrdiff_t height, \
|
||||
const int16_t *filter, const int16_t *clip, ptrdiff_t stride, ptrdiff_t vb_pos, ptrdiff_t pixel_max); \
|
||||
void BF(ff_vvc_alf_filter_chroma, bpc, opt)(uint8_t *dst, ptrdiff_t dst_stride, \
|
||||
const uint8_t *src, ptrdiff_t src_stride, ptrdiff_t width, ptrdiff_t height, \
|
||||
const int16_t *filter, const int16_t *clip, ptrdiff_t stride, ptrdiff_t vb_pos, ptrdiff_t pixel_max); \
|
||||
|
||||
#define ALF_PROTOTYPES(bpc, bd, opt) \
|
||||
void bf(ff_vvc_alf_filter_luma, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, \
|
||||
int width, int height, const int16_t *filter, const int16_t *clip, const int vb_pos); \
|
||||
void bf(ff_vvc_alf_filter_chroma, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, \
|
||||
int width, int height, const int16_t *filter, const int16_t *clip, const int vb_pos); \
|
||||
|
||||
ALF_BPC_PROTOTYPES(8, avx2)
|
||||
ALF_BPC_PROTOTYPES(16, avx2)
|
||||
|
||||
ALF_PROTOTYPES(8, 8, avx2)
|
||||
ALF_PROTOTYPES(16, 10, avx2)
|
||||
ALF_PROTOTYPES(16, 12, avx2)
|
||||
|
||||
#if ARCH_X86_64
|
||||
#if HAVE_SSE4_EXTERNAL
|
||||
#define FW_PUT(name, depth, opt) \
|
||||
@ -181,6 +202,26 @@ void bf(ff_vvc_w_avg, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride,
|
||||
AVG_FUNCS(8, 8, avx2)
|
||||
AVG_FUNCS(16, 10, avx2)
|
||||
AVG_FUNCS(16, 12, avx2)
|
||||
|
||||
#define ALF_FUNCS(bpc, bd, opt) \
|
||||
void bf(ff_vvc_alf_filter_luma, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, \
|
||||
int width, int height, const int16_t *filter, const int16_t *clip, const int vb_pos) \
|
||||
{ \
|
||||
const int param_stride = (width >> 2) * ALF_NUM_COEFF_LUMA; \
|
||||
BF(ff_vvc_alf_filter_luma, bpc, opt)(dst, dst_stride, src, src_stride, width, height, \
|
||||
filter, clip, param_stride, vb_pos, (1 << bd) - 1); \
|
||||
} \
|
||||
void bf(ff_vvc_alf_filter_chroma, bd, opt)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, \
|
||||
int width, int height, const int16_t *filter, const int16_t *clip, const int vb_pos) \
|
||||
{ \
|
||||
BF(ff_vvc_alf_filter_chroma, bpc, opt)(dst, dst_stride, src, src_stride, width, height, \
|
||||
filter, clip, 0, vb_pos,(1 << bd) - 1); \
|
||||
} \
|
||||
|
||||
ALF_FUNCS(8, 8, avx2)
|
||||
ALF_FUNCS(16, 10, avx2)
|
||||
ALF_FUNCS(16, 12, avx2)
|
||||
|
||||
#endif
|
||||
|
||||
#define PEL_LINK(dst, C, W, idx1, idx2, name, D, opt) \
|
||||
@ -252,6 +293,11 @@ AVG_FUNCS(16, 12, avx2)
|
||||
c->inter.avg = bf(ff_vvc_avg, bd, opt); \
|
||||
c->inter.w_avg = bf(ff_vvc_w_avg, bd, opt); \
|
||||
} while (0)
|
||||
|
||||
#define ALF_INIT(bd) do { \
|
||||
c->alf.filter[LUMA] = ff_vvc_alf_filter_luma_##bd##_avx2; \
|
||||
c->alf.filter[CHROMA] = ff_vvc_alf_filter_chroma_##bd##_avx2; \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
void ff_vvc_dsp_init_x86(VVCDSPContext *const c, const int bd)
|
||||
@ -287,12 +333,15 @@ void ff_vvc_dsp_init_x86(VVCDSPContext *const c, const int bd)
|
||||
if (EXTERNAL_AVX2(cpu_flags)) {
|
||||
switch (bd) {
|
||||
case 8:
|
||||
ALF_INIT(8);
|
||||
AVG_INIT(8, avx2);
|
||||
break;
|
||||
case 10:
|
||||
ALF_INIT(10);
|
||||
AVG_INIT(10, avx2);
|
||||
break;
|
||||
case 12:
|
||||
ALF_INIT(12);
|
||||
AVG_INIT(12, avx2);
|
||||
break;
|
||||
default:
|
||||
|
Loading…
Reference in New Issue
Block a user