1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

aarch64: hevc: Produce plain neon versions of qpel_bi_hv

As the plain neon qpel_h functions process two rows at a time,
we need to allocate storage for h+8 rows instead of h+7.

By allocating storage for h+8 rows, incrementing the stack
pointer won't end up at the right spot in the end. Store the
intended final stack pointer value in a register x14 which we
store on the stack.

AWS Graviton 3:
put_hevc_qpel_bi_hv4_8_c: 385.7
put_hevc_qpel_bi_hv4_8_neon: 131.0
put_hevc_qpel_bi_hv4_8_i8mm: 92.2
put_hevc_qpel_bi_hv6_8_c: 701.0
put_hevc_qpel_bi_hv6_8_neon: 239.5
put_hevc_qpel_bi_hv6_8_i8mm: 191.0
put_hevc_qpel_bi_hv8_8_c: 1162.0
put_hevc_qpel_bi_hv8_8_neon: 228.0
put_hevc_qpel_bi_hv8_8_i8mm: 225.2
put_hevc_qpel_bi_hv12_8_c: 2305.0
put_hevc_qpel_bi_hv12_8_neon: 558.0
put_hevc_qpel_bi_hv12_8_i8mm: 483.2
put_hevc_qpel_bi_hv16_8_c: 3965.2
put_hevc_qpel_bi_hv16_8_neon: 732.7
put_hevc_qpel_bi_hv16_8_i8mm: 656.5
put_hevc_qpel_bi_hv24_8_c: 8709.7
put_hevc_qpel_bi_hv24_8_neon: 1555.2
put_hevc_qpel_bi_hv24_8_i8mm: 1448.7
put_hevc_qpel_bi_hv32_8_c: 14818.0
put_hevc_qpel_bi_hv32_8_neon: 2763.7
put_hevc_qpel_bi_hv32_8_i8mm: 2468.0
put_hevc_qpel_bi_hv48_8_c: 32855.5
put_hevc_qpel_bi_hv48_8_neon: 6107.2
put_hevc_qpel_bi_hv48_8_i8mm: 5452.7
put_hevc_qpel_bi_hv64_8_c: 57591.5
put_hevc_qpel_bi_hv64_8_neon: 10660.2
put_hevc_qpel_bi_hv64_8_i8mm: 9580.0

Signed-off-by: Martin Storsjö <martin@martin.st>
This commit is contained in:
Martin Storsjö 2024-03-22 13:41:45 +02:00
parent d21b9a0411
commit f872b19714
2 changed files with 103 additions and 66 deletions

View File

@ -319,6 +319,10 @@ NEON8_FNPROTO(qpel_bi_v, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2, const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),); int height, intptr_t mx, intptr_t my, int width),);
NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride, NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2, const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width), _i8mm); int height, intptr_t mx, intptr_t my, int width), _i8mm);
@ -452,6 +456,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv,); NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv,);
NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv,); NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv,);
NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1, qpel_uni_w_hv,); NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1, qpel_uni_w_hv,);
NEON8_FNASSIGN(c->put_hevc_qpel_bi, 1, 1, qpel_bi_hv,);
if (have_i8mm(cpu_flags)) { if (have_i8mm(cpu_flags)) {
NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm); NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm);

View File

@ -4590,14 +4590,6 @@ endfunc
qpel_uni_w_hv neon qpel_uni_w_hv neon
#if HAVE_I8MM
ENABLE_I8MM
qpel_uni_w_hv neon_i8mm
DISABLE_I8MM
#endif
function hevc_put_hevc_qpel_bi_hv4_8_end_neon function hevc_put_hevc_qpel_bi_hv4_8_end_neon
mov x9, #(MAX_PB_SIZE * 2) mov x9, #(MAX_PB_SIZE * 2)
load_qpel_filterh x7, x6 load_qpel_filterh x7, x6
@ -4620,7 +4612,8 @@ function hevc_put_hevc_qpel_bi_hv4_8_end_neon
.endm .endm
1: calc_all 1: calc_all
.purgem calc .purgem calc
2: ret 2: mov sp, x14
ret
endfunc endfunc
function hevc_put_hevc_qpel_bi_hv6_8_end_neon function hevc_put_hevc_qpel_bi_hv6_8_end_neon
@ -4650,7 +4643,8 @@ function hevc_put_hevc_qpel_bi_hv6_8_end_neon
.endm .endm
1: calc_all 1: calc_all
.purgem calc .purgem calc
2: ret 2: mov sp, x14
ret
endfunc endfunc
function hevc_put_hevc_qpel_bi_hv8_8_end_neon function hevc_put_hevc_qpel_bi_hv8_8_end_neon
@ -4678,7 +4672,8 @@ function hevc_put_hevc_qpel_bi_hv8_8_end_neon
.endm .endm
1: calc_all 1: calc_all
.purgem calc .purgem calc
2: ret 2: mov sp, x14
ret
endfunc endfunc
function hevc_put_hevc_qpel_bi_hv16_8_end_neon function hevc_put_hevc_qpel_bi_hv16_8_end_neon
@ -4723,83 +4718,87 @@ function hevc_put_hevc_qpel_bi_hv16_8_end_neon
subs x10, x10, #16 subs x10, x10, #16
add x4, x4, #32 add x4, x4, #32
b.ne 0b b.ne 0b
add w10, w5, #7 mov sp, x14
lsl x10, x10, #7
sub x10, x10, x6, lsl #1 // part of first line
add sp, sp, x10 // tmp_array without first line
ret ret
endfunc endfunc
#if HAVE_I8MM .macro qpel_bi_hv suffix
ENABLE_I8MM function ff_hevc_put_hevc_qpel_bi_hv4_8_\suffix, export=1
add w10, w5, #8
function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1
add w10, w5, #7
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
str x14, [sp, #48]
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
sub x1, x1, x3 sub x1, x1, x3
add x0, sp, #48 add x0, sp, #64
mov x2, x3 mov x2, x3
add w3, w5, #7 add w3, w5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_h4_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
b hevc_put_hevc_qpel_bi_hv4_8_end_neon b hevc_put_hevc_qpel_bi_hv4_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv6_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
str x14, [sp, #48]
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
sub x1, x1, x3 sub x1, x1, x3
add x0, sp, #48 add x0, sp, #64
mov x2, x3 mov x2, x3
add x3, x5, #7 add x3, x5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_h6_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
b hevc_put_hevc_qpel_bi_hv6_8_end_neon b hevc_put_hevc_qpel_bi_hv6_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
str x14, [sp, #48]
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
sub x1, x1, x3 sub x1, x1, x3
add x0, sp, #48 add x0, sp, #64
mov x2, x3 mov x2, x3
add x3, x5, #7 add x3, x5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_h8_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
b hevc_put_hevc_qpel_bi_hv8_8_end_neon b hevc_put_hevc_qpel_bi_hv8_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv12_8_\suffix, export=1
stp x6, x7, [sp, #-80]! stp x6, x7, [sp, #-80]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x2, x3, [sp, #32] stp x2, x3, [sp, #32]
stp x0, x1, [sp, #48] stp x0, x1, [sp, #48]
str x30, [sp, #64] str x30, [sp, #64]
bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x2, x3, [sp, #32] ldp x2, x3, [sp, #32]
ldp x0, x1, [sp, #48] ldp x0, x1, [sp, #48]
@ -4807,39 +4806,42 @@ function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1
add x4, x4, #16 add x4, x4, #16
add x2, x2, #8 add x2, x2, #8
add x0, x0, #8 add x0, x0, #8
bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_\suffix)
ldr x30, [sp], #16 ldr x30, [sp], #16
ret ret
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv16_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
add x0, sp, #48 str x14, [sp, #48]
add x0, sp, #64
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
sub x1, x1, x3 sub x1, x1, x3
mov x2, x3 mov x2, x3
add w3, w5, #7 add w3, w5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_h16_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
mov x6, #16 // width mov x6, #16 // width
b hevc_put_hevc_qpel_bi_hv16_8_end_neon b hevc_put_hevc_qpel_bi_hv16_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv24_8_\suffix, export=1
stp x6, x7, [sp, #-80]! stp x6, x7, [sp, #-80]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x2, x3, [sp, #32] stp x2, x3, [sp, #32]
stp x0, x1, [sp, #48] stp x0, x1, [sp, #48]
str x30, [sp, #64] str x30, [sp, #64]
bl X(ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_bi_hv16_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x2, x3, [sp, #32] ldp x2, x3, [sp, #32]
ldp x0, x1, [sp, #48] ldp x0, x1, [sp, #48]
@ -4847,73 +4849,103 @@ function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1
add x4, x4, #32 add x4, x4, #32
add x2, x2, #16 add x2, x2, #16
add x0, x0, #16 add x0, x0, #16
bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm) bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix)
ldr x30, [sp], #16 ldr x30, [sp], #16
ret ret
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv32_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv32_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
add x0, sp, #48 str x14, [sp, #48]
add x0, sp, #64
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
mov x2, x3 mov x2, x3
sub x1, x1, x3 sub x1, x1, x3
add w3, w5, #7 add w3, w5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm) mov w6, #32
bl X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
mov x6, #32 // width mov x6, #32 // width
b hevc_put_hevc_qpel_bi_hv16_8_end_neon b hevc_put_hevc_qpel_bi_hv16_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv48_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv48_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
add x0, sp, #48 str x14, [sp, #48]
add x0, sp, #64
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
mov x2, x3 mov x2, x3
sub x1, x1, x3 sub x1, x1, x3
add w3, w5, #7 add w3, w5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h48_8_neon_i8mm) .ifc \suffix, neon
mov w6, #48
bl X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
.else
bl X(ff_hevc_put_hevc_qpel_h48_8_\suffix)
.endif
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
mov x6, #48 // width mov x6, #48 // width
b hevc_put_hevc_qpel_bi_hv16_8_end_neon b hevc_put_hevc_qpel_bi_hv16_8_end_neon
endfunc endfunc
function ff_hevc_put_hevc_qpel_bi_hv64_8_neon_i8mm, export=1 function ff_hevc_put_hevc_qpel_bi_hv64_8_\suffix, export=1
add w10, w5, #7 add w10, w5, #8
lsl x10, x10, #7 lsl x10, x10, #7
mov x14, sp
sub sp, sp, x10 // tmp_array sub sp, sp, x10 // tmp_array
stp x7, x30, [sp, #-48]! stp x7, x30, [sp, #-64]!
stp x4, x5, [sp, #16] stp x4, x5, [sp, #16]
stp x0, x1, [sp, #32] stp x0, x1, [sp, #32]
add x0, sp, #48 str x14, [sp, #48]
add x0, sp, #64
sub x1, x2, x3, lsl #1 sub x1, x2, x3, lsl #1
mov x2, x3 mov x2, x3
sub x1, x1, x3 sub x1, x1, x3
add w3, w5, #7 add w3, w5, #7
mov x4, x6 mov x4, x6
bl X(ff_hevc_put_hevc_qpel_h64_8_neon_i8mm) .ifc \suffix, neon
mov w6, #64
bl X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
.else
bl X(ff_hevc_put_hevc_qpel_h64_8_\suffix)
.endif
ldp x4, x5, [sp, #16] ldp x4, x5, [sp, #16]
ldp x0, x1, [sp, #32] ldp x0, x1, [sp, #32]
ldp x7, x30, [sp], #48 ldr x14, [sp, #48]
ldp x7, x30, [sp], #64
mov x6, #64 // width mov x6, #64 // width
b hevc_put_hevc_qpel_bi_hv16_8_end_neon b hevc_put_hevc_qpel_bi_hv16_8_end_neon
endfunc endfunc
.endm
qpel_bi_hv neon
#if HAVE_I8MM
ENABLE_I8MM
qpel_uni_w_hv neon_i8mm
qpel_bi_hv neon_i8mm
DISABLE_I8MM DISABLE_I8MM
#endif // HAVE_I8MM #endif // HAVE_I8MM