You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-11-23 21:54:53 +02:00
lavu/riscv: require B or zba explicitly
This commit is contained in:
@@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
#include "asm.S"
|
#include "asm.S"
|
||||||
|
|
||||||
func ff_vector_fmul_window_scaled_rvv, zve64x
|
func ff_vector_fmul_window_scaled_rvv, zve64x, zba
|
||||||
csrwi vxrm, 0
|
csrwi vxrm, 0
|
||||||
vsetvli t0, zero, e16, m1, ta, ma
|
vsetvli t0, zero, e16, m1, ta, ma
|
||||||
sh2add a2, a4, a2
|
sh2add a2, a4, a2
|
||||||
@@ -68,7 +68,7 @@ func ff_vector_fmul_window_scaled_rvv, zve64x
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_vector_fmul_window_fixed_rvv, zve64x
|
func ff_vector_fmul_window_fixed_rvv, zve64x, zba
|
||||||
csrwi vxrm, 0
|
csrwi vxrm, 0
|
||||||
vsetvli t0, zero, e16, m1, ta, ma
|
vsetvli t0, zero, e16, m1, ta, ma
|
||||||
sh2add a2, a4, a2
|
sh2add a2, a4, a2
|
||||||
@@ -112,7 +112,7 @@ func ff_vector_fmul_window_fixed_rvv, zve64x
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_vector_fmul_fixed_rvv, zve32x
|
func ff_vector_fmul_fixed_rvv, zve32x, zba
|
||||||
csrwi vxrm, 0
|
csrwi vxrm, 0
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a3, e32, m4, ta, ma
|
vsetvli t0, a3, e32, m4, ta, ma
|
||||||
@@ -129,7 +129,7 @@ func ff_vector_fmul_fixed_rvv, zve32x
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_vector_fmul_reverse_fixed_rvv, zve32x
|
func ff_vector_fmul_reverse_fixed_rvv, zve32x, zba
|
||||||
csrwi vxrm, 0
|
csrwi vxrm, 0
|
||||||
// e16/m4 and e32/m8 are possible but slow the gathers down.
|
// e16/m4 and e32/m8 are possible but slow the gathers down.
|
||||||
vsetvli t0, zero, e16, m1, ta, ma
|
vsetvli t0, zero, e16, m1, ta, ma
|
||||||
@@ -155,7 +155,7 @@ func ff_vector_fmul_reverse_fixed_rvv, zve32x
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_vector_fmul_add_fixed_rvv, zve32x
|
func ff_vector_fmul_add_fixed_rvv, zve32x, zba
|
||||||
csrwi vxrm, 0
|
csrwi vxrm, 0
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a4, e32, m8, ta, ma
|
vsetvli t0, a4, e32, m8, ta, ma
|
||||||
@@ -175,7 +175,7 @@ func ff_vector_fmul_add_fixed_rvv, zve32x
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_scalarproduct_fixed_rvv, zve64x
|
func ff_scalarproduct_fixed_rvv, zve64x, zba
|
||||||
li t1, 1 << 30
|
li t1, 1 << 30
|
||||||
vsetvli t0, zero, e64, m8, ta, ma
|
vsetvli t0, zero, e64, m8, ta, ma
|
||||||
vmv.v.x v8, zero
|
vmv.v.x v8, zero
|
||||||
@@ -198,7 +198,7 @@ func ff_scalarproduct_fixed_rvv, zve64x
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1]
|
// (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1]
|
||||||
func ff_butterflies_fixed_rvv, zve32x
|
func ff_butterflies_fixed_rvv, zve32x, zba
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a2, e32, m4, ta, ma
|
vsetvli t0, a2, e32, m4, ta, ma
|
||||||
vle32.v v16, (a0)
|
vle32.v v16, (a0)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
#include "asm.S"
|
#include "asm.S"
|
||||||
|
|
||||||
// (a0) = (a1) * (a2) [0..a3-1]
|
// (a0) = (a1) * (a2) [0..a3-1]
|
||||||
func ff_vector_fmul_rvv, zve32f
|
func ff_vector_fmul_rvv, zve32f, zba
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a3, e32, m8, ta, ma
|
vsetvli t0, a3, e32, m8, ta, ma
|
||||||
vle32.v v16, (a1)
|
vle32.v v16, (a1)
|
||||||
@@ -38,7 +38,7 @@ func ff_vector_fmul_rvv, zve32f
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) += (a1) * fa0 [0..a2-1]
|
// (a0) += (a1) * fa0 [0..a2-1]
|
||||||
func ff_vector_fmac_scalar_rvv, zve32f
|
func ff_vector_fmac_scalar_rvv, zve32f, zba
|
||||||
NOHWF fmv.w.x fa0, a2
|
NOHWF fmv.w.x fa0, a2
|
||||||
NOHWF mv a2, a3
|
NOHWF mv a2, a3
|
||||||
1:
|
1:
|
||||||
@@ -57,7 +57,7 @@ NOHWF mv a2, a3
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a1) * fa0 [0..a2-1]
|
// (a0) = (a1) * fa0 [0..a2-1]
|
||||||
func ff_vector_fmul_scalar_rvv, zve32f
|
func ff_vector_fmul_scalar_rvv, zve32f, zba
|
||||||
NOHWF fmv.w.x fa0, a2
|
NOHWF fmv.w.x fa0, a2
|
||||||
NOHWF mv a2, a3
|
NOHWF mv a2, a3
|
||||||
1:
|
1:
|
||||||
@@ -73,7 +73,7 @@ NOHWF mv a2, a3
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_vector_fmul_window_rvv, zve32f
|
func ff_vector_fmul_window_rvv, zve32f, zba
|
||||||
// a0: dst, a1: src0, a2: src1, a3: window, a4: length
|
// a0: dst, a1: src0, a2: src1, a3: window, a4: length
|
||||||
// e16/m2 and e32/m4 are possible but slower due to gather.
|
// e16/m2 and e32/m4 are possible but slower due to gather.
|
||||||
vsetvli t0, zero, e16, m1, ta, ma
|
vsetvli t0, zero, e16, m1, ta, ma
|
||||||
@@ -113,7 +113,7 @@ func ff_vector_fmul_window_rvv, zve32f
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a1) * (a2) + (a3) [0..a4-1]
|
// (a0) = (a1) * (a2) + (a3) [0..a4-1]
|
||||||
func ff_vector_fmul_add_rvv, zve32f
|
func ff_vector_fmul_add_rvv, zve32f, zba
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a4, e32, m8, ta, ma
|
vsetvli t0, a4, e32, m8, ta, ma
|
||||||
vle32.v v8, (a1)
|
vle32.v v8, (a1)
|
||||||
@@ -133,7 +133,7 @@ endfunc
|
|||||||
|
|
||||||
// TODO factor vrsub, separate last iteration?
|
// TODO factor vrsub, separate last iteration?
|
||||||
// (a0) = (a1) * reverse(a2) [0..a3-1]
|
// (a0) = (a1) * reverse(a2) [0..a3-1]
|
||||||
func ff_vector_fmul_reverse_rvv, zve32f
|
func ff_vector_fmul_reverse_rvv, zve32f, zba
|
||||||
// e16/m4 and e32/m8 are possible but slower due to gather.
|
// e16/m4 and e32/m8 are possible but slower due to gather.
|
||||||
vsetvli t0, zero, e16, m1, ta, ma
|
vsetvli t0, zero, e16, m1, ta, ma
|
||||||
sh2add a2, a3, a2
|
sh2add a2, a3, a2
|
||||||
@@ -159,7 +159,7 @@ func ff_vector_fmul_reverse_rvv, zve32f
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1]
|
// (a0) = (a0) + (a1), (a1) = (a0) - (a1) [0..a2-1]
|
||||||
func ff_butterflies_float_rvv, zve32f
|
func ff_butterflies_float_rvv, zve32f, zba
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a2, e32, m8, ta, ma
|
vsetvli t0, a2, e32, m8, ta, ma
|
||||||
vle32.v v16, (a0)
|
vle32.v v16, (a0)
|
||||||
@@ -177,7 +177,7 @@ func ff_butterflies_float_rvv, zve32f
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// a0 = (a0).(a1) [0..a2-1]
|
// a0 = (a0).(a1) [0..a2-1]
|
||||||
func ff_scalarproduct_float_rvv, zve32f
|
func ff_scalarproduct_float_rvv, zve32f, zba
|
||||||
vsetvli t0, zero, e32, m8, ta, ma
|
vsetvli t0, zero, e32, m8, ta, ma
|
||||||
vmv.v.x v8, zero
|
vmv.v.x v8, zero
|
||||||
vmv.s.x v0, zero
|
vmv.s.x v0, zero
|
||||||
@@ -199,7 +199,7 @@ NOHWF fmv.x.w a0, fa0
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a1) * (a2) [0..a3-1]
|
// (a0) = (a1) * (a2) [0..a3-1]
|
||||||
func ff_vector_dmul_rvv, zve64d
|
func ff_vector_dmul_rvv, zve64d, zba
|
||||||
1:
|
1:
|
||||||
vsetvli t0, a3, e64, m8, ta, ma
|
vsetvli t0, a3, e64, m8, ta, ma
|
||||||
vle64.v v16, (a1)
|
vle64.v v16, (a1)
|
||||||
@@ -216,7 +216,7 @@ func ff_vector_dmul_rvv, zve64d
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) += (a1) * fa0 [0..a2-1]
|
// (a0) += (a1) * fa0 [0..a2-1]
|
||||||
func ff_vector_dmac_scalar_rvv, zve64d
|
func ff_vector_dmac_scalar_rvv, zve64d, zba
|
||||||
NOHWD fmv.d.x fa0, a2
|
NOHWD fmv.d.x fa0, a2
|
||||||
NOHWD mv a2, a3
|
NOHWD mv a2, a3
|
||||||
1:
|
1:
|
||||||
@@ -234,7 +234,7 @@ NOHWD mv a2, a3
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
// (a0) = (a1) * fa0 [0..a2-1]
|
// (a0) = (a1) * fa0 [0..a2-1]
|
||||||
func ff_vector_dmul_scalar_rvv, zve64d
|
func ff_vector_dmul_scalar_rvv, zve64d, zba
|
||||||
NOHWD fmv.d.x fa0, a2
|
NOHWD fmv.d.x fa0, a2
|
||||||
NOHWD mv a2, a3
|
NOHWD mv a2, a3
|
||||||
1:
|
1:
|
||||||
@@ -250,7 +250,7 @@ NOHWD mv a2, a3
|
|||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
func ff_scalarproduct_double_rvv, zve64f
|
func ff_scalarproduct_double_rvv, zve64f, zba
|
||||||
vsetvli t0, zero, e64, m8, ta, ma
|
vsetvli t0, zero, e64, m8, ta, ma
|
||||||
vmv.v.x v8, zero
|
vmv.v.x v8, zero
|
||||||
vmv.s.x v0, zero
|
vmv.s.x v0, zero
|
||||||
|
|||||||
Reference in New Issue
Block a user