1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '0b9a237b2386ff84a6f99716bd58fa27a1b767e7'

* commit '0b9a237b2386ff84a6f99716bd58fa27a1b767e7':
  hevc: Add NEON 4x4 and 8x8 IDCT

[15:12:59] <@ubitux> hevc_idct_4x4_8_c: 389.1
[15:13:00] <@ubitux> hevc_idct_4x4_8_neon: 126.6
[15:13:02] <@ubitux> our ^
[15:13:06] <@ubitux> hevc_idct_4x4_8_c: 389.3
[15:13:08] <@ubitux> hevc_idct_4x4_8_neon: 107.8
[15:13:10] <@ubitux> hevc_idct_4x4_10_c: 418.6
[15:13:12] <@ubitux> hevc_idct_4x4_10_neon: 108.1
[15:13:14] <@ubitux> libav ^
[15:13:30] <@ubitux> so yeah, we can probably trash our versions here

Merged-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2017-10-24 19:10:22 -03:00
commit c0683dce89
6 changed files with 217 additions and 237 deletions

View File

@ -21,6 +21,6 @@
#include "libavcodec/hevcdsp.h" #include "libavcodec/hevcdsp.h"
void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth); void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth);
#endif /* AVCODEC_ARM_HEVCDSP_ARM_H */ #endif /* AVCODEC_ARM_HEVCDSP_ARM_H */

View File

@ -1,5 +1,7 @@
/* /*
* ARM NEON optimised IDCT functions for HEVC decoding
* Copyright (c) 2014 Seppo Tomperi <seppo.tomperi@vtt.fi> * Copyright (c) 2014 Seppo Tomperi <seppo.tomperi@vtt.fi>
* Copyright (c) 2017 Alexandra Hájková
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
* *
@ -19,7 +21,13 @@
*/ */
#include "libavutil/arm/asm.S" #include "libavutil/arm/asm.S"
#include "neon.S"
const trans, align=4
.short 64, 83, 64, 36
.short 89, 75, 50, 18
.short 90, 87, 80, 70
.short 57, 43, 25, 9
endconst
function ff_hevc_idct_4x4_dc_neon_8, export=1 function ff_hevc_idct_4x4_dc_neon_8, export=1
ldrsh r1, [r0] ldrsh r1, [r0]
@ -168,30 +176,6 @@ function ff_hevc_add_residual_32x32_neon_8, export=1
bx lr bx lr
endfunc endfunc
.macro transpose_16b_8x8 r0, r1, r2, r3, r4, r5, r6, r7
vtrn.64 \r0, \r4
vtrn.64 \r1, \r5
vtrn.64 \r2, \r6
vtrn.64 \r3, \r7
vtrn.32 \r0, \r2
vtrn.32 \r1, \r3
vtrn.32 \r4, \r6
vtrn.32 \r5, \r7
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
vtrn.16 \r4, \r5
vtrn.16 \r6, \r7
.endm
// in 4 q regs
// output 8 d regs
.macro transpose_16b_4x4 r0, r1, r2, r3
vtrn.32 \r0, \r2
vtrn.32 \r1, \r3
vtrn.16 \r0, \r1
vtrn.16 \r2, \r3
.endm
/* uses registers q2 - q9 for temp values */ /* uses registers q2 - q9 for temp values */
/* TODO: reorder */ /* TODO: reorder */
.macro tr4_luma_shift r0, r1, r2, r3, shift .macro tr4_luma_shift r0, r1, r2, r3, shift
@ -225,67 +209,6 @@ endfunc
vqrshrn.s32 \r3, q5, \shift vqrshrn.s32 \r3, q5, \shift
.endm .endm
/* uses registers q2 - q6 for temp values */
.macro tr4 r0, r1, r2, r3
vmull.s16 q4, \r1, d0[0] // 83 * src1
vmull.s16 q6, \r1, d0[1] // 36 * src1
vshll.s16 q2, \r0, #6 // 64 * src0
vshll.s16 q3, \r2, #6 // 64 * src2
vadd.s32 q5, q2, q3 // 64 * (src0 + src2) e0
vsub.s32 q2, q2, q3 // 64 * (src0 - src2) e1
vmlal.s16 q4, \r3, d0[1] // 83 * src1 + 36 * src3 o0
vmlsl.s16 q6, \r3, d0[0] // 36 * src1 - 83 * src3 o1
vsub.s32 q3, q5, q4 // e0 - o0
vadd.s32 q4, q5, q4 // e0 + o0
vadd.s32 q5, q2, q6 // e1 + o1
vsub.s32 q6, q2, q6 // e1 - o1
.endm
.macro tr4_shift r0, r1, r2, r3, shift
vmull.s16 q4, \r1, d0[0] // 83 * src1
vmull.s16 q6, \r1, d0[1] // 36 * src1
vshll.s16 q2, \r0, #6 // 64 * src0
vshll.s16 q3, \r2, #6 // 64 * src2
vadd.s32 q5, q2, q3 // 64 * (src0 + src2) e0
vsub.s32 q2, q2, q3 // 64 * (src0 - src2) e1
vmlal.s16 q4, \r3, d0[1] // 83 * src1 + 36 * src3 o0
vmlsl.s16 q6, \r3, d0[0] // 36 * src1 - 83 * src3 o1
vsub.s32 q3, q5, q4 // e0 - o0
vadd.s32 q4, q5, q4 // e0 + o0
vadd.s32 q5, q2, q6 // e1 + o1
vsub.s32 q6, q2, q6 // e1 - o1
vqrshrn.s32 \r0, q4, \shift
vqrshrn.s32 \r1, q5, \shift
vqrshrn.s32 \r2, q6, \shift
vqrshrn.s32 \r3, q3, \shift
.endm
function ff_hevc_transform_4x4_neon_8, export=1
vpush {d8-d15}
vld1.16 {q14, q15}, [r0] // coeffs
ldr r3, =0x00240053 // 36 and 83
vmov.32 d0[0], r3
tr4_shift d28, d29, d30, d31, #7
vtrn.16 d28, d29
vtrn.16 d30, d31
vtrn.32 q14, q15
tr4_shift d28, d29, d30, d31, #12
vtrn.16 d28, d29
vtrn.16 d30, d31
vtrn.32 q14, q15
vst1.16 {q14, q15}, [r0]
vpop {d8-d15}
bx lr
endfunc
function ff_hevc_transform_luma_4x4_neon_8, export=1 function ff_hevc_transform_luma_4x4_neon_8, export=1
vpush {d8-d15} vpush {d8-d15}
vld1.16 {q14, q15}, [r0] // coeffs vld1.16 {q14, q15}, [r0] // coeffs
@ -312,154 +235,201 @@ function ff_hevc_transform_luma_4x4_neon_8, export=1
bx lr bx lr
endfunc endfunc
.macro tr8_begin in0, in1, in2, in3 .macro sum_sub out, in, c, op
vmull.s16 q7, \in0, d1[1] // 89 * src1 .ifc \op, +
vmull.s16 q8, \in0, d1[0] // 75 * src1 vmlal.s16 \out, \in, \c
vmull.s16 q9, \in0, d1[3] // 50 * src1 .else
vmull.s16 q10, \in0, d1[2] // 18 * src1 vmlsl.s16 \out, \in, \c
.endif
vmlal.s16 q7, \in1, d1[0] // 75 * src3
vmlsl.s16 q8, \in1, d1[2] //-18 * src3
vmlsl.s16 q9, \in1, d1[1] //-89 * src3
vmlsl.s16 q10, \in1, d1[3] //-50 * src3
vmlal.s16 q7, \in2, d1[3] // 50 * src5
vmlsl.s16 q8, \in2, d1[1] //-89 * src5
vmlal.s16 q9, \in2, d1[2] // 18 * src5
vmlal.s16 q10, \in2, d1[0] // 75 * src5
vmlal.s16 q7, \in3, d1[2] // 18 * src7
vmlsl.s16 q8, \in3, d1[3] //-50 * src7
vmlal.s16 q9, \in3, d1[0] // 75 * src7
vmlsl.s16 q10, \in3, d1[1] //-89 * src7
.endm .endm
.macro tr8_end shift .macro tr_4x4 in0, in1, in2, in3, out0, out1, out2, out3, shift, tmp0, tmp1, tmp2, tmp3, tmp4
vadd.s32 q1, q4, q7 // e_8[0] + o_8[0], dst[0] vshll.s16 \tmp0, \in0, #6
vsub.s32 q4, q4, q7 // e_8[0] - o_8[0], dst[7] vmull.s16 \tmp2, \in1, d4[1]
vmov \tmp1, \tmp0
vmull.s16 \tmp3, \in1, d4[3]
vmlal.s16 \tmp0, \in2, d4[0] @e0
vmlsl.s16 \tmp1, \in2, d4[0] @e1
vmlal.s16 \tmp2, \in3, d4[3] @o0
vmlsl.s16 \tmp3, \in3, d4[1] @o1
vadd.s32 q2, q5, q8 // e_8[1] + o_8[1], dst[1] vadd.s32 \tmp4, \tmp0, \tmp2
vsub.s32 q5, q5, q8 // e_8[1] - o_8[1], dst[6] vsub.s32 \tmp0, \tmp0, \tmp2
vadd.s32 \tmp2, \tmp1, \tmp3
vadd.s32 q11, q6, q9 // e_8[2] + o_8[2], dst[2] vsub.s32 \tmp1, \tmp1, \tmp3
vsub.s32 q6, q6, q9 // e_8[2] - o_8[2], dst[5] vqrshrn.s32 \out0, \tmp4, #\shift
vqrshrn.s32 \out3, \tmp0, #\shift
vadd.s32 q12, q3, q10 // e_8[3] + o_8[3], dst[3] vqrshrn.s32 \out1, \tmp2, #\shift
vsub.s32 q3, q3, q10 // e_8[3] - o_8[3], dst[4] vqrshrn.s32 \out2, \tmp1, #\shift
vqrshrn.s32 d2, q1, \shift
vqrshrn.s32 d3, q2, \shift
vqrshrn.s32 d4, q11, \shift
vqrshrn.s32 d5, q12, \shift
vqrshrn.s32 d6, q3, \shift
vqrshrn.s32 d7, q6, \shift
vqrshrn.s32 d9, q4, \shift
vqrshrn.s32 d8, q5, \shift
.endm .endm
function ff_hevc_transform_8x8_neon_8, export=1 .macro tr_4x4_8 in0, in1, in2, in3, out0, out1, out2, out3, tmp0, tmp1, tmp2, tmp3
push {r4-r8} vshll.s16 \tmp0, \in0, #6
vpush {d8-d15} vld1.s16 {\in0}, [r1, :64]!
mov r5, #16 vmov \tmp1, \tmp0
vmull.s16 \tmp2, \in1, \in0[1]
vmull.s16 \tmp3, \in1, \in0[3]
vmlal.s16 \tmp0, \in2, \in0[0] @e0
vmlsl.s16 \tmp1, \in2, \in0[0] @e1
vmlal.s16 \tmp2, \in3, \in0[3] @o0
vmlsl.s16 \tmp3, \in3, \in0[1] @o1
adr r3, tr4f vld1.s16 {\in0}, [r1, :64]
vld1.16 {d0, d1}, [r3]
// left half vadd.s32 \out0, \tmp0, \tmp2
vld1.16 {d24}, [r0], r5 vadd.s32 \out1, \tmp1, \tmp3
vld1.16 {d25}, [r0], r5 vsub.s32 \out2, \tmp1, \tmp3
vld1.16 {d26}, [r0], r5 vsub.s32 \out3, \tmp0, \tmp2
vld1.16 {d27}, [r0], r5
vld1.16 {d28}, [r0], r5
vld1.16 {d29}, [r0], r5
vld1.16 {d30}, [r0], r5
vld1.16 {d31}, [r0], r5
sub r0, #128
tr8_begin d25, d27, d29, d31
tr4 d24, d26, d28, d30
tr8_end #7
vst1.16 {d2}, [r0], r5
vst1.16 {d3}, [r0], r5
vst1.16 {d4}, [r0], r5
vst1.16 {d5}, [r0], r5
vst1.16 {d6}, [r0], r5
vst1.16 {d7}, [r0], r5
vst1.16 {d8}, [r0], r5
vst1.16 {d9}, [r0], r5
sub r0, #128
//skip right half if col_limit in r1 is less than 4
cmp r1, #4
blt 1f
//right half
add r0, #8
vld1.16 {d24}, [r0], r5
vld1.16 {d25}, [r0], r5
vld1.16 {d26}, [r0], r5
vld1.16 {d27}, [r0], r5
vld1.16 {d28}, [r0], r5
vld1.16 {d29}, [r0], r5
vld1.16 {d30}, [r0], r5
vld1.16 {d31}, [r0], r5
sub r0, #128
tr8_begin d25, d27, d29, d31
tr4 d24, d26, d28, d30
tr8_end #7
vst1.16 {d2}, [r0], r5
vst1.16 {d3}, [r0], r5
vst1.16 {d4}, [r0], r5
vst1.16 {d5}, [r0], r5
vst1.16 {d6}, [r0], r5
vst1.16 {d7}, [r0], r5
vst1.16 {d8}, [r0], r5
vst1.16 {d9}, [r0], r5
sub r0, #136
1:
// top half
vldm r0, {q12-q15} // coeffs
transpose_16b_4x4 d24, d26, d28, d30
transpose_16b_4x4 d25, d27, d29, d31
tr8_begin d26, d30, d27, d31
tr4 d24, d28, d25, d29
tr8_end #12
transpose_16b_4x4 d2, d3, d4, d5
transpose_16b_4x4 d6, d7, d8, d9
vswp d7, d5
vswp d7, d8
vswp d3, d6
vswp d6, d4
vstm r0!, {q1-q4}
// bottom half sub r1, r1, #8
vldm r0, {q12-q15} // coeffs .endm
transpose_16b_4x4 d24, d26, d28, d30
transpose_16b_4x4 d25, d27, d29, d31 @ Do a 4x4 transpose, using q registers for the subtransposes that don't
tr8_begin d26, d30, d27, d31 @ need to address the indiviudal d registers.
tr4 d24, d28, d25, d29 @ r0,r1 == rq0, r2,r3 == rq1
tr8_end #12 .macro transpose_4x4 rq0, rq1, r0, r1, r2, r3
transpose_16b_4x4 d2, d3, d4, d5 vtrn.32 \rq0, \rq1
transpose_16b_4x4 d6, d7, d8, d9 vtrn.16 \r0, \r1
vswp d7, d5 vtrn.16 \r2, \r3
vswp d7, d8 .endm
vswp d3, d6
vswp d6, d4 .macro idct_4x4 bitdepth
//vstm r0, {q1-q4} function ff_hevc_idct_4x4_\bitdepth\()_neon, export=1
vst1.16 {q1-q2}, [r0] @r0 - coeffs
add r0, #32 vld1.s16 {q0-q1}, [r0, :128]
vst1.16 {q3-q4}, [r0]
sub r0, #32 movrel r1, trans
vpop {d8-d15} vld1.s16 {d4}, [r1, :64]
pop {r4-r8}
tr_4x4 d0, d1, d2, d3, d16, d17, d18, d19, 7, q10, q11, q12, q13, q0
transpose_4x4 q8, q9, d16, d17, d18, d19
tr_4x4 d16, d17, d18, d19, d0, d1, d2, d3, 20 - \bitdepth, q10, q11, q12, q13, q0
transpose_4x4 q0, q1, d0, d1, d2, d3
vst1.s16 {d0-d3}, [r0, :128]
bx lr bx lr
endfunc endfunc
.endm
.align 4 .macro transpose8_4x4 r0, r1, r2, r3
tr4f: vtrn.16 \r0, \r1
.word 0x00240053 // 36 and d1[0] = 83 vtrn.16 \r2, \r3
.word 0x00000000 vtrn.32 \r0, \r2
tr8f: vtrn.32 \r1, \r3
.word 0x0059004b // 89, d0[0] = 75 .endm
.word 0x00320012 // 50, d0[2] = 18
tr16: .macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7, l0, l1, l2, l3, l4, l5, l6, l7
.word 0x005a0057 // 90, d2[0] = 87 transpose8_4x4 \r0, \r1, \r2, \r3
.word 0x00500046 // 80, d2[2] = 70 transpose8_4x4 \r4, \r5, \r6, \r7
.word 0x0039002b // 57, d2[0] = 43
.word 0x00190009 // 25, d2[2] = 9 transpose8_4x4 \l0, \l1, \l2, \l3
transpose8_4x4 \l4, \l5, \l6, \l7
.endm
.macro tr_8x4 shift, in0, in1, in2, in3, in4, in5, in6, in7
tr_4x4_8 \in0, \in2, \in4, \in6, q8, q9, q10, q11, q12, q13, q14, q15
vmull.s16 q14, \in1, \in0[2]
vmull.s16 q12, \in1, \in0[0]
vmull.s16 q13, \in1, \in0[1]
sum_sub q14, \in3, \in0[0], -
sum_sub q12, \in3, \in0[1], +
sum_sub q13, \in3, \in0[3], -
sum_sub q14, \in5, \in0[3], +
sum_sub q12, \in5, \in0[2], +
sum_sub q13, \in5, \in0[0], -
sum_sub q14, \in7, \in0[1], +
sum_sub q12, \in7, \in0[3], +
sum_sub q13, \in7, \in0[2], -
vadd.s32 q15, q10, q14
vsub.s32 q10, q10, q14
vqrshrn.s32 \in2, q15, \shift
vmull.s16 q15, \in1, \in0[3]
sum_sub q15, \in3, \in0[2], -
sum_sub q15, \in5, \in0[1], +
sum_sub q15, \in7, \in0[0], -
vqrshrn.s32 \in5, q10, \shift
vadd.s32 q10, q8, q12
vsub.s32 q8, q8, q12
vadd.s32 q12, q9, q13
vsub.s32 q9, q9, q13
vadd.s32 q14, q11, q15
vsub.s32 q11, q11, q15
vqrshrn.s32 \in0, q10, \shift
vqrshrn.s32 \in7, q8, \shift
vqrshrn.s32 \in1, q12, \shift
vqrshrn.s32 \in6, q9, \shift
vqrshrn.s32 \in3, q14, \shift
vqrshrn.s32 \in4, q11, \shift
.endm
.macro idct_8x8 bitdepth
function ff_hevc_idct_8x8_\bitdepth\()_neon, export=1
@r0 - coeffs
vpush {q4-q7}
mov r1, r0
mov r2, #64
add r3, r0, #32
vld1.s16 {q0-q1}, [r1,:128], r2
vld1.s16 {q2-q3}, [r3,:128], r2
vld1.s16 {q4-q5}, [r1,:128], r2
vld1.s16 {q6-q7}, [r3,:128], r2
movrel r1, trans
tr_8x4 7, d0, d2, d4, d6, d8, d10, d12, d14
tr_8x4 7, d1, d3, d5, d7, d9, d11, d13, d15
@ Transpose each 4x4 block, and swap how d4-d7 and d8-d11 are used.
@ Layout before:
@ d0 d1
@ d2 d3
@ d4 d5
@ d6 d7
@ d8 d9
@ d10 d11
@ d12 d13
@ d14 d15
transpose_8x8 d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, d11, d13, d15
@ Now the layout is:
@ d0 d8
@ d2 d10
@ d4 d12
@ d6 d14
@ d1 d9
@ d3 d11
@ d5 d13
@ d7 d15
tr_8x4 20 - \bitdepth, d0, d2, d4, d6, d1, d3, d5, d7
vswp d0, d8
tr_8x4 20 - \bitdepth, d0, d10, d12, d14, d9, d11, d13, d15
vswp d0, d8
transpose_8x8 d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, d11, d13, d15
mov r1, r0
mov r2, #64
add r3, r0, #32
vst1.s16 {q0-q1}, [r1,:128], r2
vst1.s16 {q2-q3}, [r3,:128], r2
vst1.s16 {q4-q5}, [r1,:128], r2
vst1.s16 {q6-q7}, [r3,:128], r2
vpop {q4-q7}
bx lr
endfunc
.endm
idct_4x4 8
idct_4x4 10
idct_8x8 8
idct_8x8 10

View File

@ -19,14 +19,16 @@
*/ */
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/arm/cpu.h" #include "libavutil/arm/cpu.h"
#include "libavcodec/hevcdsp.h" #include "libavcodec/hevcdsp.h"
#include "hevcdsp_arm.h" #include "hevcdsp_arm.h"
av_cold void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth) av_cold void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth)
{ {
int cpu_flags = av_get_cpu_flags(); int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) if (have_neon(cpu_flags))
ff_hevcdsp_init_neon(c, bit_depth); ff_hevc_dsp_init_neon(c, bit_depth);
} }

View File

@ -27,8 +27,10 @@ void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta
void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q); void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q); void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
void ff_hevc_h_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q); void ff_hevc_h_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
void ff_hevc_transform_4x4_neon_8(int16_t *coeffs, int col_limit); void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit);
void ff_hevc_transform_8x8_neon_8(int16_t *coeffs, int col_limit); void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit);
void ff_hevc_idct_4x4_10_neon(int16_t *coeffs, int col_limit);
void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit);
void ff_hevc_idct_4x4_dc_neon_8(int16_t *coeffs); void ff_hevc_idct_4x4_dc_neon_8(int16_t *coeffs);
void ff_hevc_idct_8x8_dc_neon_8(int16_t *coeffs); void ff_hevc_idct_8x8_dc_neon_8(int16_t *coeffs);
void ff_hevc_idct_16x16_dc_neon_8(int16_t *coeffs); void ff_hevc_idct_16x16_dc_neon_8(int16_t *coeffs);
@ -142,7 +144,7 @@ void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t
put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, src2, MAX_PB_SIZE); put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, src2, MAX_PB_SIZE);
} }
av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth) av_cold void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth)
{ {
if (bit_depth == 8) { if (bit_depth == 8) {
int x; int x;
@ -150,8 +152,8 @@ av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_neon; c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_neon;
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_neon; c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_neon;
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_neon; c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_neon;
c->idct[0] = ff_hevc_transform_4x4_neon_8; c->idct[0] = ff_hevc_idct_4x4_8_neon;
c->idct[1] = ff_hevc_transform_8x8_neon_8; c->idct[1] = ff_hevc_idct_8x8_8_neon;
c->idct_dc[0] = ff_hevc_idct_4x4_dc_neon_8; c->idct_dc[0] = ff_hevc_idct_4x4_dc_neon_8;
c->idct_dc[1] = ff_hevc_idct_8x8_dc_neon_8; c->idct_dc[1] = ff_hevc_idct_8x8_dc_neon_8;
c->idct_dc[2] = ff_hevc_idct_16x16_dc_neon_8; c->idct_dc[2] = ff_hevc_idct_16x16_dc_neon_8;
@ -221,4 +223,9 @@ av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_qpel_uw_pixels_w48_neon_8; c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_qpel_uw_pixels_w48_neon_8;
c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_qpel_uw_pixels_w64_neon_8; c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_qpel_uw_pixels_w64_neon_8;
} }
if (bit_depth == 10) {
c->idct[0] = ff_hevc_idct_4x4_10_neon;
c->idct[1] = ff_hevc_idct_8x8_10_neon;
}
} }

View File

@ -257,12 +257,12 @@ int i = 0;
break; break;
} }
if (ARCH_ARM)
ff_hevc_dsp_init_arm(hevcdsp, bit_depth);
if (ARCH_PPC) if (ARCH_PPC)
ff_hevc_dsp_init_ppc(hevcdsp, bit_depth); ff_hevc_dsp_init_ppc(hevcdsp, bit_depth);
if (ARCH_X86) if (ARCH_X86)
ff_hevc_dsp_init_x86(hevcdsp, bit_depth); ff_hevc_dsp_init_x86(hevcdsp, bit_depth);
if (ARCH_ARM)
ff_hevcdsp_init_arm(hevcdsp, bit_depth);
if (ARCH_MIPS) if (ARCH_MIPS)
ff_hevc_dsp_init_mips(hevcdsp, bit_depth); ff_hevc_dsp_init_mips(hevcdsp, bit_depth);
} }

View File

@ -127,8 +127,9 @@ void ff_hevc_dsp_init(HEVCDSPContext *hpc, int bit_depth);
extern const int8_t ff_hevc_epel_filters[7][4]; extern const int8_t ff_hevc_epel_filters[7][4];
extern const int8_t ff_hevc_qpel_filters[3][16]; extern const int8_t ff_hevc_qpel_filters[3][16];
void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth);
void ff_hevc_dsp_init_ppc(HEVCDSPContext *c, const int bit_depth); void ff_hevc_dsp_init_ppc(HEVCDSPContext *c, const int bit_depth);
void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth); void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth);
void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth);
void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth); void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth);
#endif /* AVCODEC_HEVCDSP_H */ #endif /* AVCODEC_HEVCDSP_H */