2022-10-05 18:12:53 +02:00
|
|
|
/*
|
|
|
|
* Copyright © 2022 Rémi Denis-Courmont.
|
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "libavutil/riscv/asm.S"
|
|
|
|
|
2024-05-07 20:08:03 +02:00
|
|
|
func ff_opus_postfilter_rvv, zve32f, zbb
|
2023-11-02 21:08:56 +02:00
|
|
|
flw fa0, 0(a2) // g0
|
|
|
|
slli t1, a1, 2
|
|
|
|
flw fa1, 4(a2) // g1
|
|
|
|
sub t0, a0, t1
|
|
|
|
flw fa2, 8(a2) // g2
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
addi t1, t0, -2 * 4 // data - (period + 2) = initial &x4
|
|
|
|
vsetivli zero, 4, e32, m4, ta, ma
|
2023-11-02 21:08:56 +02:00
|
|
|
addi t0, t0, 2 * 4 // data - (period - 2) = initial &x0
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vle32.v v16, (t1)
|
2023-11-02 21:08:56 +02:00
|
|
|
addi t3, a1, -2 // maximum parallelism w/o stepping our tail
|
2022-10-05 18:12:55 +02:00
|
|
|
1:
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vslidedown.vi v8, v16, 2
|
2023-11-02 21:08:56 +02:00
|
|
|
min t1, a3, t3
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vslide1down.vx v12, v16, zero
|
2023-11-02 21:08:56 +02:00
|
|
|
vsetvli t1, t1, e32, m4, ta, ma
|
|
|
|
vle32.v v0, (t0) // x0
|
|
|
|
sub a3, a3, t1
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vslide1down.vx v4, v8, zero
|
2023-11-02 21:08:56 +02:00
|
|
|
sh2add t0, t1, t0
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vle32.v v28, (a0)
|
2023-11-02 21:08:56 +02:00
|
|
|
addi t2, t1, -4
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vslideup.vi v4, v0, 1
|
|
|
|
vslideup.vi v8, v4, 1
|
|
|
|
vslideup.vi v12, v8, 1
|
|
|
|
vslideup.vi v16, v12, 1
|
2023-11-02 21:08:56 +02:00
|
|
|
vfadd.vv v20, v4, v12
|
|
|
|
vfadd.vv v24, v0, v16
|
lavc/opusdsp: simplify R-V V postfilter
This skips the round-trip to scalar register for the sliding 'x'
coefficients, improving performance by about 5%. The trick here is that
the vector slide-up instruction preserves elements in destination vector
until the slide offset.
The switch from vfslide1up.vf to vslideup.vi also allows the elimination
of data dependencies on consecutive slides. Since the specifications
recommend sticking to power of two offsets, we could slide as follows:
vslideup.vi v8, v0, 2
vslideup.vi v4, v0, 1
vslideup.vi v12, v8, 1
vslideup.vi v16, v8, 2
However in the device under test, this seems to make performance slightly
worse, so this is left for (in)validation with future better hardware.
2023-12-16 10:02:08 +02:00
|
|
|
vslidedown.vx v16, v0, t2
|
2023-11-02 21:08:56 +02:00
|
|
|
vfmacc.vf v28, fa0, v8
|
|
|
|
vfmacc.vf v28, fa1, v20
|
|
|
|
vfmacc.vf v28, fa2, v24
|
|
|
|
vse32.v v28, (a0)
|
|
|
|
sh2add a0, t1, a0
|
|
|
|
bnez a3, 1b
|
2022-10-05 18:12:53 +02:00
|
|
|
|
|
|
|
ret
|
|
|
|
endfunc
|