mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
2f77ecc6bc
Benched on K230 for VLEN 128, SpaceMIT for VLEN 256. Variants for 4 width have no speedup for VLEN 256 vs VLEN 128 on available hardware, so were disabled. C RVV128 C RVV256 avg_h264_qpel_4_mc00_8 33.9 33.6 (1.01x) avg_h264_qpel_4_mc01_8 218.8 89.1 (2.46x) avg_h264_qpel_4_mc02_8 218.8 79.8 (2.74x) avg_h264_qpel_4_mc03_8 218.8 89.1 (2.46x) avg_h264_qpel_4_mc10_8 172.3 126.1 (1.37x) avg_h264_qpel_4_mc11_8 339.1 190.8 (1.78x) avg_h264_qpel_4_mc12_8 533.6 357.6 (1.49x) avg_h264_qpel_4_mc13_8 348.4 190.8 (1.83x) avg_h264_qpel_4_mc20_8 144.8 116.8 (1.24x) avg_h264_qpel_4_mc21_8 478.1 385.6 (1.24x) avg_h264_qpel_4_mc22_8 348.4 283.6 (1.23x) avg_h264_qpel_4_mc23_8 478.1 394.6 (1.21x) avg_h264_qpel_4_mc30_8 172.6 126.1 (1.37x) avg_h264_qpel_4_mc31_8 339.4 191.1 (1.78x) avg_h264_qpel_4_mc32_8 542.9 357.6 (1.52x) avg_h264_qpel_4_mc33_8 339.4 191.1 (1.78x) avg_h264_qpel_8_mc00_8 116.8 42.9 (2.72x) 123.6 50.6 (2.44x) avg_h264_qpel_8_mc01_8 774.4 163.1 (4.75x) 779.8 165.1 (4.72x) avg_h264_qpel_8_mc02_8 774.4 154.1 (5.03x) 779.8 144.3 (5.40x) avg_h264_qpel_8_mc03_8 774.4 163.3 (4.74x) 779.8 165.3 (4.72x) avg_h264_qpel_8_mc10_8 617.1 237.3 (2.60x) 613.1 227.6 (2.69x) avg_h264_qpel_8_mc11_8 1209.3 376.4 (3.21x) 1206.8 363.1 (3.32x) avg_h264_qpel_8_mc12_8 1913.3 598.6 (3.20x) 1894.3 561.1 (3.38x) avg_h264_qpel_8_mc13_8 1218.6 376.4 (3.24x) 1217.1 363.1 (3.35x) avg_h264_qpel_8_mc20_8 524.4 228.1 (2.30x) 519.3 227.6 (2.28x) avg_h264_qpel_8_mc21_8 1709.6 681.9 (2.51x) 1707.1 644.3 (2.65x) avg_h264_qpel_8_mc22_8 1274.3 459.6 (2.77x) 1279.8 436.1 (2.93x) avg_h264_qpel_8_mc23_8 1700.3 672.6 (2.53x) 1706.8 644.6 (2.65x) avg_h264_qpel_8_mc30_8 607.6 246.6 (2.46x) 623.6 238.1 (2.62x) avg_h264_qpel_8_mc31_8 1209.6 376.4 (3.21x) 1206.8 363.1 (3.32x) avg_h264_qpel_8_mc32_8 1904.1 607.9 (3.13x) 1894.3 571.3 (3.32x) avg_h264_qpel_8_mc33_8 1209.6 376.1 (3.22x) 1206.8 363.1 (3.32x) avg_h264_qpel_16_mc00_8 431.9 89.1 (4.85x) 436.1 71.3 (6.12x) avg_h264_qpel_16_mc01_8 2894.6 376.1 (7.70x) 2842.3 300.6 (9.46x) avg_h264_qpel_16_mc02_8 2987.3 348.4 (8.57x) 2967.3 290.1 (10.23x) avg_h264_qpel_16_mc03_8 2885.3 376.4 (7.67x) 2842.3 300.6 (9.46x) avg_h264_qpel_16_mc10_8 2404.1 524.4 (4.58x) 2404.8 456.8 (5.26x) avg_h264_qpel_16_mc11_8 4709.4 811.6 (5.80x) 4675.6 706.8 (6.62x) avg_h264_qpel_16_mc12_8 7477.9 1274.3 (5.87x) 7436.1 1061.1 (7.01x) avg_h264_qpel_16_mc13_8 4718.6 820.6 (5.75x) 4655.1 706.8 (6.59x) avg_h264_qpel_16_mc20_8 2052.1 487.1 (4.21x) 2071.3 446.3 (4.64x) avg_h264_qpel_16_mc21_8 7440.6 1422.6 (5.23x) 6727.8 1217.3 (5.53x) avg_h264_qpel_16_mc22_8 5051.9 950.4 (5.32x) 5071.6 790.3 (6.42x) avg_h264_qpel_16_mc23_8 6764.9 1422.3 (4.76x) 6748.6 1217.3 (5.54x) avg_h264_qpel_16_mc30_8 2413.1 524.4 (4.60x) 2415.1 467.3 (5.17x) avg_h264_qpel_16_mc31_8 4681.6 839.1 (5.58x) 4675.6 727.6 (6.43x) avg_h264_qpel_16_mc32_8 8579.6 1292.8 (6.64x) 7436.3 1071.3 (6.94x) avg_h264_qpel_16_mc33_8 5375.9 829.9 (6.48x) 4665.3 717.3 (6.50x) put_h264_qpel_4_mc00_8 24.4 24.4 (1.00x) put_h264_qpel_4_mc01_8 987.4 79.8 (12.37x) put_h264_qpel_4_mc02_8 190.8 79.8 (2.39x) put_h264_qpel_4_mc03_8 209.6 89.1 (2.35x) put_h264_qpel_4_mc10_8 163.3 117.1 (1.39x) put_h264_qpel_4_mc11_8 339.4 181.6 (1.87x) put_h264_qpel_4_mc12_8 533.6 348.4 (1.53x) put_h264_qpel_4_mc13_8 339.4 190.8 (1.78x) put_h264_qpel_4_mc20_8 126.3 116.8 (1.08x) put_h264_qpel_4_mc21_8 468.9 376.1 (1.25x) put_h264_qpel_4_mc22_8 330.1 274.4 (1.20x) put_h264_qpel_4_mc23_8 468.9 376.1 (1.25x) put_h264_qpel_4_mc30_8 163.3 126.3 (1.29x) put_h264_qpel_4_mc31_8 339.1 191.1 (1.77x) put_h264_qpel_4_mc32_8 533.6 348.4 (1.53x) put_h264_qpel_4_mc33_8 339.4 181.8 (1.87x) put_h264_qpel_8_mc00_8 98.6 33.6 (2.93x) 92.3 40.1 (2.30x) put_h264_qpel_8_mc01_8 737.1 153.8 (4.79x) 738.1 144.3 (5.12x) put_h264_qpel_8_mc02_8 663.1 135.3 (4.90x) 665.1 134.1 (4.96x) put_h264_qpel_8_mc03_8 737.4 154.1 (4.79x) 1508.8 144.3 (10.46x) put_h264_qpel_8_mc10_8 598.4 237.1 (2.52x) 592.3 227.6 (2.60x) put_h264_qpel_8_mc11_8 1172.3 357.9 (3.28x) 1175.6 342.3 (3.43x) put_h264_qpel_8_mc12_8 1867.1 589.1 (3.17x) 1863.1 561.1 (3.32x) put_h264_qpel_8_mc13_8 1172.6 366.9 (3.20x) 1175.6 352.8 (3.33x) put_h264_qpel_8_mc20_8 450.4 218.8 (2.06x) 446.3 206.8 (2.16x) put_h264_qpel_8_mc21_8 1672.3 663.1 (2.52x) 1675.6 633.8 (2.64x) put_h264_qpel_8_mc22_8 1144.6 1200.1 (0.95x) 1144.3 425.6 (2.69x) put_h264_qpel_8_mc23_8 1672.6 672.4 (2.49x) 1665.3 634.1 (2.63x) put_h264_qpel_8_mc30_8 598.6 237.3 (2.52x) 613.1 227.6 (2.69x) put_h264_qpel_8_mc31_8 1172.3 376.1 (3.12x) 1175.6 352.6 (3.33x) put_h264_qpel_8_mc32_8 1857.8 598.6 (3.10x) 1863.1 561.1 (3.32x) put_h264_qpel_8_mc33_8 1172.3 376.1 (3.12x) 1175.6 352.8 (3.33x) put_h264_qpel_16_mc00_8 320.6 61.4 (5.22x) 321.3 60.8 (5.28x) put_h264_qpel_16_mc01_8 2774.3 339.1 (8.18x) 2759.1 279.8 (9.86x) put_h264_qpel_16_mc02_8 2589.1 320.6 (8.08x) 2571.6 269.3 (9.55x) put_h264_qpel_16_mc03_8 2774.3 339.4 (8.17x) 2738.1 290.1 (9.44x) put_h264_qpel_16_mc10_8 2274.3 487.4 (4.67x) 2290.1 436.1 (5.25x) put_h264_qpel_16_mc11_8 5237.1 792.9 (6.60x) 4529.8 685.8 (6.61x) put_h264_qpel_16_mc12_8 7357.6 1255.8 (5.86x) 7352.8 1040.1 (7.07x) put_h264_qpel_16_mc13_8 4579.9 792.9 (5.78x) 4571.6 686.1 (6.66x) put_h264_qpel_16_mc20_8 1802.1 459.6 (3.92x) 1800.6 425.6 (4.23x) put_h264_qpel_16_mc21_8 6644.6 2246.6 (2.96x) 6644.3 1196.6 (5.55x) put_h264_qpel_16_mc22_8 4589.1 913.4 (5.02x) 4592.3 769.3 (5.97x) put_h264_qpel_16_mc23_8 6644.6 1394.6 (4.76x) 6634.1 1196.6 (5.54x) put_h264_qpel_16_mc30_8 2274.3 496.6 (4.58x) 2290.1 456.8 (5.01x) put_h264_qpel_16_mc31_8 5255.6 802.1 (6.55x) 4550.8 706.8 (6.44x) put_h264_qpel_16_mc32_8 7376.1 1265.1 (5.83x) 7352.8 1050.6 (7.00x) put_h264_qpel_16_mc33_8 4579.9 802.1 (5.71x) 4561.1 696.3 (6.55x) Signed-off-by: Niklas Haas <git@haasn.dev> Signed-off-by: J. Dekker <jdek@itanimul.li>
465 lines
16 KiB
ArmAsm
465 lines
16 KiB
ArmAsm
/*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*
|
|
* Copyright (c) 2024 Niklas Haas
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are met:
|
|
*
|
|
* 1. Redistributions of source code must retain the above copyright notice,
|
|
* this list of conditions and the following disclaimer.
|
|
*
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include "libavutil/riscv/asm.S"
|
|
|
|
.macro lx rd, addr
|
|
#if (__riscv_xlen == 32)
|
|
lw \rd, \addr
|
|
#elif (__riscv_xlen == 64)
|
|
ld \rd, \addr
|
|
#else
|
|
lq \rd, \addr
|
|
#endif
|
|
.endm
|
|
|
|
.macro sx rd, addr
|
|
#if (__riscv_xlen == 32)
|
|
sw \rd, \addr
|
|
#elif (__riscv_xlen == 64)
|
|
sd \rd, \addr
|
|
#else
|
|
sq \rd, \addr
|
|
#endif
|
|
.endm
|
|
|
|
/* output is unclipped; clobbers v26-v31 plus t0 and t02 */
|
|
.macro lowpass_h vdst, src
|
|
addi t4, \src, 3
|
|
lbu t5, 2(\src)
|
|
vle8.v v31, (t4)
|
|
lbu t4, 1(\src)
|
|
vslide1up.vx v30, v31, t5
|
|
lbu t5, 0(\src)
|
|
vslide1up.vx v29, v30, t4
|
|
lbu t4, -1(\src)
|
|
vslide1up.vx v28, v29, t5
|
|
lbu t5, -2(\src)
|
|
vslide1up.vx v27, v28, t4
|
|
vslide1up.vx v26, v27, t5
|
|
vwaddu.vv \vdst, v26, v31
|
|
vwmaccu.vx \vdst, t6, v28
|
|
vwmaccu.vx \vdst, t6, v29
|
|
vwmaccsu.vx \vdst, a7, v27
|
|
vwmaccsu.vx \vdst, a7, v30
|
|
.endm
|
|
|
|
/* output is unclipped */
|
|
.macro lowpass_v vdst, vsrc0, vsrc1, vsrc2, vsrc3, vsrc4, vsrc5, signed=0
|
|
.if \signed
|
|
vwadd.vv \vdst, \vsrc0, \vsrc5
|
|
vwmacc.vx \vdst, t6, \vsrc2
|
|
vwmacc.vx \vdst, t6, \vsrc3
|
|
vwmacc.vx \vdst, a7, \vsrc1
|
|
vwmacc.vx \vdst, a7, \vsrc4
|
|
.else
|
|
vwaddu.vv \vdst, \vsrc0, \vsrc5
|
|
vwmaccu.vx \vdst, t6, \vsrc2
|
|
vwmaccu.vx \vdst, t6, \vsrc3
|
|
vwmaccsu.vx \vdst, a7, \vsrc1
|
|
vwmaccsu.vx \vdst, a7, \vsrc4
|
|
.endif
|
|
.endm
|
|
|
|
.macro qpel_mc00 op, dst, src, stride, size
|
|
func ff_\op\()_h264_qpel_pixels, zve32x
|
|
1: add t1, a2, a1
|
|
add t2, a2, t1
|
|
add t3, a2, t2
|
|
vle8.v v0, (a1)
|
|
vle8.v v1, (t1)
|
|
vle8.v v2, (t2)
|
|
vle8.v v3, (t3)
|
|
addi a4, a4, -4
|
|
add a1, a2, t3
|
|
add t1, a2, a0
|
|
add t2, a2, t1
|
|
add t3, a2, t2
|
|
.ifc \op, avg
|
|
vle8.v v4, (a0)
|
|
vle8.v v5, (t1)
|
|
vle8.v v6, (t2)
|
|
vle8.v v7, (t3)
|
|
vaaddu.vv v0, v0, v4
|
|
vaaddu.vv v1, v1, v5
|
|
vaaddu.vv v2, v2, v6
|
|
vaaddu.vv v3, v3, v7
|
|
.endif
|
|
vse8.v v0, (a0)
|
|
vse8.v v1, (t1)
|
|
vse8.v v2, (t2)
|
|
vse8.v v3, (t3)
|
|
add a0, a2, t3
|
|
bnez a4, 1b
|
|
jr t0
|
|
endfunc
|
|
.endm
|
|
|
|
qpel_mc00 put, a0, a1, a2, a4
|
|
qpel_mc00 avg, a0, a1, a2, a4
|
|
|
|
.macro qpel_lowpass op, ext, lmul, lmul2
|
|
func ff_\op\()_h264_qpel_h_lowpass_\lmul\ext, zve32x
|
|
1: add t1, a3, a1
|
|
add t2, a3, t1
|
|
add t3, a3, t2
|
|
lowpass_h v0, a1
|
|
lowpass_h v2, t1
|
|
lowpass_h v4, t2
|
|
lowpass_h v6, t3
|
|
add a1, a3, t3
|
|
addi a4, a4, -4
|
|
vsetvli zero, zero, e16, \lmul2, ta, ma
|
|
vmax.vx v0, v0, zero
|
|
vmax.vx v2, v2, zero
|
|
vmax.vx v4, v4, zero
|
|
vmax.vx v6, v6, zero
|
|
vsetvli zero, zero, e8, \lmul, ta, ma
|
|
vnclipu.wi v0, v0, 5
|
|
vnclipu.wi v2, v2, 5
|
|
vnclipu.wi v4, v4, 5
|
|
vnclipu.wi v6, v6, 5
|
|
.ifc \ext, _l2
|
|
add t1, a6, a5
|
|
add t2, a6, t1
|
|
add t3, a6, t2
|
|
vle8.v v8, (a5)
|
|
vle8.v v10, (t1)
|
|
vle8.v v12, (t2)
|
|
vle8.v v14, (t3)
|
|
add a5, a2, t3
|
|
vaaddu.vv v0, v0, v8
|
|
vaaddu.vv v2, v2, v10
|
|
vaaddu.vv v4, v4, v12
|
|
vaaddu.vv v6, v6, v14
|
|
.endif
|
|
add t1, a2, a0
|
|
add t2, a2, t1
|
|
add t3, a2, t2
|
|
.ifc \op, avg
|
|
vle8.v v1, (a0)
|
|
vle8.v v3, (t1)
|
|
vle8.v v5, (t2)
|
|
vle8.v v7, (t3)
|
|
vaaddu.vv v0, v0, v1
|
|
vaaddu.vv v2, v2, v3
|
|
vaaddu.vv v4, v4, v5
|
|
vaaddu.vv v6, v6, v7
|
|
.endif
|
|
vse8.v v0, (a0)
|
|
vse8.v v2, (t1)
|
|
vse8.v v4, (t2)
|
|
vse8.v v6, (t3)
|
|
add a0, a2, t3
|
|
bnez a4, 1b
|
|
jr t0
|
|
endfunc
|
|
|
|
func ff_\op\()_h264_qpel_v_lowpass_\lmul\ext, zve32x
|
|
sub t1, a1, a3
|
|
sub t2, t1, a3
|
|
vle8.v v2, (a1)
|
|
vle8.v v1, (t1)
|
|
vle8.v v0, (t2)
|
|
add t1, a1, a3
|
|
add t2, t1, a3
|
|
add a1, t2, a3
|
|
vle8.v v3, (t1)
|
|
vle8.v v4, (t2)
|
|
1: add t1, a3, a1
|
|
add t2, a3, t1
|
|
add t3, a3, t2
|
|
vle8.v v5, (a1)
|
|
vle8.v v6, (t1)
|
|
vle8.v v7, (t2)
|
|
vle8.v v8, (t3)
|
|
add a1, a3, t3
|
|
lowpass_v v24, v0, v1, v2, v3, v4, v5
|
|
lowpass_v v26, v1, v2, v3, v4, v5, v6
|
|
lowpass_v v28, v2, v3, v4, v5, v6, v7
|
|
lowpass_v v30, v3, v4, v5, v6, v7, v8
|
|
addi a4, a4, -4
|
|
vsetvli zero, zero, e16, \lmul2, ta, ma
|
|
vmax.vx v24, v24, zero
|
|
vmax.vx v26, v26, zero
|
|
vmax.vx v28, v28, zero
|
|
vmax.vx v30, v30, zero
|
|
vsetvli zero, zero, e8, \lmul, ta, ma
|
|
vnclipu.wi v24, v24, 5
|
|
vnclipu.wi v26, v26, 5
|
|
vnclipu.wi v28, v28, 5
|
|
vnclipu.wi v30, v30, 5
|
|
.ifc \ext, _l2
|
|
add t1, a6, a5
|
|
add t2, a6, t1
|
|
add t3, a6, t2
|
|
vle8.v v9, (a5)
|
|
vle8.v v10, (t1)
|
|
vle8.v v11, (t2)
|
|
vle8.v v12, (t3)
|
|
add a5, a6, t3
|
|
vaaddu.vv v24, v24, v9
|
|
vaaddu.vv v26, v26, v10
|
|
vaaddu.vv v28, v28, v11
|
|
vaaddu.vv v30, v30, v12
|
|
.endif
|
|
add t1, a2, a0
|
|
add t2, a2, t1
|
|
add t3, a2, t2
|
|
.ifc \op, avg
|
|
vle8.v v9, (a0)
|
|
vle8.v v10, (t1)
|
|
vle8.v v11, (t2)
|
|
vle8.v v12, (t3)
|
|
vaaddu.vv v24, v24, v9
|
|
vaaddu.vv v26, v26, v10
|
|
vaaddu.vv v28, v28, v11
|
|
vaaddu.vv v30, v30, v12
|
|
.endif
|
|
vse8.v v24, (a0)
|
|
vse8.v v26, (t1)
|
|
vse8.v v28, (t2)
|
|
vse8.v v30, (t3)
|
|
add a0, a2, t3
|
|
vmv.v.v v0, v4
|
|
vmv.v.v v1, v5
|
|
vmv.v.v v2, v6
|
|
vmv.v.v v3, v7
|
|
vmv.v.v v4, v8
|
|
bnez a4, 1b
|
|
jr t0
|
|
endfunc
|
|
|
|
func ff_\op\()_h264_qpel_hv_lowpass_\lmul\ext, zve32x
|
|
sub t1, a1, a3
|
|
sub t2, t1, a3
|
|
lowpass_h v4, a1
|
|
lowpass_h v2, t1
|
|
lowpass_h v0, t2
|
|
add t1, a1, a3
|
|
add t2, t1, a3
|
|
add a1, t2, a3
|
|
lowpass_h v6, t1
|
|
lowpass_h v8, t2
|
|
1: add t1, a3, a1
|
|
add t2, a3, t1
|
|
add t3, a3, t2
|
|
lowpass_h v10, a1
|
|
lowpass_h v12, t1
|
|
lowpass_h v14, t2
|
|
lowpass_h v16, t3
|
|
vsetvli zero, zero, e16, \lmul2, ta, ma
|
|
addi a4, a4, -4
|
|
lowpass_v v20, v0, v2, v4, v6, v8, v10, signed=1
|
|
lowpass_v v24, v2, v4, v6, v8, v10, v12, signed=1
|
|
lowpass_v v28, v4, v6, v8, v10, v12, v14, signed=1
|
|
vnclip.wi v0, v20, 10
|
|
lowpass_v v20, v6, v8, v10, v12, v14, v16, signed=1
|
|
vnclip.wi v2, v24, 10
|
|
vnclip.wi v4, v28, 10
|
|
vnclip.wi v6, v20, 10
|
|
vmax.vx v18, v0, zero
|
|
vmax.vx v20, v2, zero
|
|
vmax.vx v22, v4, zero
|
|
vmax.vx v24, v6, zero
|
|
vmv.v.v v0, v8
|
|
vmv.v.v v2, v10
|
|
vmv.v.v v4, v12
|
|
vmv.v.v v6, v14
|
|
vmv.v.v v8, v16
|
|
add a1, a3, t3
|
|
vsetvli zero, zero, e8, \lmul, ta, ma
|
|
vnclipu.wi v18, v18, 0
|
|
vnclipu.wi v20, v20, 0
|
|
vnclipu.wi v22, v22, 0
|
|
vnclipu.wi v24, v24, 0
|
|
.ifc \ext, _l2
|
|
add t1, a6, a5
|
|
add t2, a6, t1
|
|
add t3, a6, t2
|
|
vle8.v v26, (a5)
|
|
vle8.v v27, (t1)
|
|
vle8.v v28, (t2)
|
|
vle8.v v29, (t3)
|
|
add a5, a6, t3
|
|
vaaddu.vv v18, v18, v26
|
|
vaaddu.vv v20, v20, v27
|
|
vaaddu.vv v22, v22, v28
|
|
vaaddu.vv v24, v24, v29
|
|
.endif
|
|
add t1, a2, a0
|
|
add t2, a2, t1
|
|
add t3, a2, t2
|
|
.ifc \op, avg
|
|
vle8.v v26, (a0)
|
|
vle8.v v27, (t1)
|
|
vle8.v v28, (t2)
|
|
vle8.v v29, (t3)
|
|
vaaddu.vv v18, v18, v26
|
|
vaaddu.vv v20, v20, v27
|
|
vaaddu.vv v22, v22, v28
|
|
vaaddu.vv v24, v24, v29
|
|
.endif
|
|
vse8.v v18, (a0)
|
|
vse8.v v20, (t1)
|
|
vse8.v v22, (t2)
|
|
vse8.v v24, (t3)
|
|
add a0, a2, t3
|
|
bnez a4, 1b
|
|
jr t0
|
|
endfunc
|
|
.endm
|
|
|
|
/* Note: We could possibly specialize for the width 8 / width 4 cases by
|
|
loading 32 bit integers, but this makes the convolutions more complicated
|
|
to implement, so it's not necessarily any faster. */
|
|
|
|
.macro h264_qpel lmul, lmul2
|
|
qpel_lowpass put, , \lmul, \lmul2
|
|
qpel_lowpass put, _l2, \lmul, \lmul2
|
|
qpel_lowpass avg, , \lmul, \lmul2
|
|
qpel_lowpass avg, _l2, \lmul, \lmul2
|
|
.endm
|
|
|
|
h264_qpel m1, m2
|
|
h264_qpel mf2, m1
|
|
h264_qpel mf4, mf2
|
|
h264_qpel mf8, mf4
|
|
|
|
.macro h264_qpel_1pass op, case, lmul, size, ext=rvv, dir, offset
|
|
func ff_\op\()_h264_qpel\size\()_\case\()_\ext, zve32x
|
|
lpad 0
|
|
vsetivli zero, \size, e8, \lmul, ta, ma
|
|
csrwi vxrm, 0
|
|
li a4, \size
|
|
li t6, 20
|
|
li a7, -5
|
|
mv a3, a2
|
|
mv t0, ra
|
|
.ifnb \offset
|
|
.ifc \dir, v
|
|
add a5, a1, \offset
|
|
.else
|
|
addi a5, a1, \offset
|
|
.endif
|
|
mv a6, a3
|
|
j ff_\op\()_h264_qpel_\dir\()_lowpass_\lmul\()_l2
|
|
.else
|
|
j ff_\op\()_h264_qpel_\dir\()_lowpass_\lmul\()
|
|
.endif
|
|
endfunc
|
|
.endm
|
|
|
|
.macro h264_qpel_2pass op, case, lmul, size, ext=rvv, dir1, dir2, off1=0, off2
|
|
func ff_\op\()_h264_qpel\size\()_\case\()_\ext, zve32x
|
|
lpad 0
|
|
vsetivli zero, \size, e8, \lmul, ta, ma
|
|
csrwi vxrm, 0
|
|
addi sp, sp, (-(__riscv_xlen >> 2))
|
|
li a4, \size
|
|
li t6, 20
|
|
li a7, -5
|
|
sx a0, 0(sp)
|
|
sx a1, (__riscv_xlen >> 3)(sp)
|
|
.ifc \off1, a2
|
|
add a1, a1, \off1
|
|
.elseif \off1
|
|
addi a1, a1, \off1
|
|
.endif
|
|
mv a3, a2
|
|
.ifc \op, avg
|
|
// Use temporary array on stack for the first pass
|
|
addi a0, sp, -(\size * \size)
|
|
li a2, \size
|
|
.endif
|
|
jal t0, ff_put_h264_qpel_\dir1\()_lowpass_\lmul
|
|
lx a0, 0(sp)
|
|
lx a1, (__riscv_xlen >> 3)(sp)
|
|
.ifc \op, put
|
|
// Directly reuse the first pass output buffer
|
|
mv a5, a0
|
|
mv a6, a2
|
|
.else
|
|
addi a5, sp, -(\size * \size)
|
|
li a6, \size
|
|
mv a2, a3
|
|
.endif
|
|
.ifnb \off2
|
|
addi a1, a1, \off2
|
|
.endif
|
|
li a4, \size
|
|
mv t0, ra
|
|
addi sp, sp, 16
|
|
j ff_\op\()_h264_qpel_\dir2\()_lowpass_\lmul\()_l2
|
|
endfunc
|
|
.endm
|
|
|
|
.macro ff_h264_qpel_fns op, lmul, size, ext=rvv
|
|
func ff_\op\()_h264_qpel\size\()_mc00_\ext, zve32x
|
|
lpad 0
|
|
vsetivli zero, \size, e8, \lmul, ta, ma
|
|
csrwi vxrm, 0
|
|
li a4, \size
|
|
mv t0, ra
|
|
j ff_\op\()_h264_qpel_pixels
|
|
endfunc
|
|
|
|
h264_qpel_1pass \op, mc20, \lmul, \size, \ext, h
|
|
h264_qpel_1pass \op, mc02, \lmul, \size, \ext, v
|
|
h264_qpel_1pass \op, mc10, \lmul, \size, \ext, h, 0
|
|
h264_qpel_1pass \op, mc30, \lmul, \size, \ext, h, 1
|
|
h264_qpel_1pass \op, mc01, \lmul, \size, \ext, v, zero
|
|
h264_qpel_1pass \op, mc03, \lmul, \size, \ext, v, a2
|
|
h264_qpel_1pass \op, mc22, \lmul, \size, \ext, hv
|
|
|
|
h264_qpel_2pass \op, mc11, \lmul, \size, \ext, h, v
|
|
h264_qpel_2pass \op, mc21, \lmul, \size, \ext, h, hv
|
|
h264_qpel_2pass \op, mc12, \lmul, \size, \ext, v, hv
|
|
h264_qpel_2pass \op, mc31, \lmul, \size, \ext, h, v, off2=1
|
|
h264_qpel_2pass \op, mc13, \lmul, \size, \ext, h, v, a2
|
|
h264_qpel_2pass \op, mc33, \lmul, \size, \ext, h, v, a2, 1
|
|
h264_qpel_2pass \op, mc23, \lmul, \size, \ext, h, hv, a2
|
|
h264_qpel_2pass \op, mc32, \lmul, \size, \ext, v, hv, 1
|
|
.endm
|
|
|
|
ff_h264_qpel_fns put, mf2, 16, rvv256
|
|
ff_h264_qpel_fns put, mf4, 8, rvv256
|
|
/* ff_h264_qpel_fns put, mf8, 4, rvv256 */
|
|
|
|
ff_h264_qpel_fns avg, mf2, 16, rvv256
|
|
ff_h264_qpel_fns avg, mf4, 8, rvv256
|
|
/* ff_h264_qpel_fns avg, mf8, 4, rvv256 */
|
|
|
|
ff_h264_qpel_fns put, m1, 16, rvv
|
|
ff_h264_qpel_fns put, mf2, 8, rvv
|
|
ff_h264_qpel_fns put, mf4, 4, rvv
|
|
|
|
ff_h264_qpel_fns avg, m1, 16, rvv
|
|
ff_h264_qpel_fns avg, mf2, 8, rvv
|
|
ff_h264_qpel_fns avg, mf4, 4, rvv
|