1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

arm: use HAVE*_INLINE/EXTERNAL macros for conditional compilation

These macros reflect the actual capabilities required here.

Signed-off-by: Mans Rullgard <mans@mansr.com>
This commit is contained in:
Mans Rullgard 2012-12-01 16:41:39 +00:00
parent 7f2b3dcabd
commit a7831d509f
10 changed files with 21 additions and 21 deletions

View File

@ -23,7 +23,7 @@
#include "config.h"
#if HAVE_NEON && HAVE_INLINE_ASM
#if HAVE_NEON_INLINE
#define VMUL2 VMUL2
static inline float *VMUL2(float *dst, const float *v, unsigned idx,
@ -138,6 +138,6 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
return dst;
}
#endif /* HAVE_NEON && HAVE_INLINE_ASM */
#endif /* HAVE_NEON_INLINE */
#endif /* AVCODEC_ARM_AAC_H */

View File

@ -25,7 +25,7 @@
#include "config.h"
#include "libavutil/intmath.h"
#if HAVE_ARMV6 && HAVE_INLINE_ASM && AV_GCC_VERSION_AT_LEAST(4,4)
#if HAVE_ARMV6_INLINE && AV_GCC_VERSION_AT_LEAST(4,4)
#define decode_blockcodes decode_blockcodes
static inline int decode_blockcodes(int code1, int code2, int levels,
@ -79,7 +79,7 @@ static inline int decode_blockcodes(int code1, int code2, int levels,
#endif
#if HAVE_NEON && HAVE_INLINE_ASM && HAVE_ASM_MOD_Y
#if HAVE_NEON_INLINE && HAVE_ASM_MOD_Y
#define int8x8_fmul_int32 int8x8_fmul_int32
static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale)

View File

@ -22,7 +22,7 @@
#include "config.h"
#include "libavutil/arm/asm.S"
#if HAVE_ARMV5TE
#if HAVE_ARMV5TE_EXTERNAL
function ff_prefetch_arm, export=1
subs r2, r2, #1
pld [r0]

View File

@ -28,7 +28,7 @@
#if HAVE_INLINE_ASM
#if HAVE_ARMV6
#if HAVE_ARMV6_INLINE
#define MULH MULH
static inline av_const int MULH(int a, int b)
{
@ -50,7 +50,7 @@ static av_always_inline av_const int FASTDIV(int a, int b)
return r;
}
#else /* HAVE_ARMV6 */
#else /* HAVE_ARMV6_INLINE */
#define FASTDIV FASTDIV
static av_always_inline av_const int FASTDIV(int a, int b)
@ -64,7 +64,7 @@ static av_always_inline av_const int FASTDIV(int a, int b)
#define MLS64(d, a, b) MAC64(d, -(a), b)
#if HAVE_ARMV5TE
#if HAVE_ARMV5TE_INLINE
/* signed 16x16 -> 32 multiply add accumulate */
# define MAC16(rt, ra, rb) \

View File

@ -37,7 +37,7 @@
# define U(x)
#endif
#if HAVE_ARMV6 && HAVE_INLINE_ASM
#if HAVE_ARMV6_INLINE
#define vp56_rac_get_prob vp56_rac_get_prob_armv6
static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr)

View File

@ -21,7 +21,7 @@
#include "config.h"
#if HAVE_ARMV6
#if HAVE_ARMV6_EXTERNAL
#define decode_block_coeffs_internal ff_decode_block_coeffs_armv6
int ff_decode_block_coeffs_armv6(VP56RangeCoder *rc, DCTELEM block[16],
uint8_t probs[8][3][NUM_DCT_TOKENS-1],

View File

@ -118,7 +118,7 @@ function ff_vp8_luma_dc_wht_armv6, export=1
usub16 r4, r4, r6 @ block[0,1][3]
usub16 r5, r5, r12 @ block[2,3][3]
#if HAVE_ARMV6T2
#if HAVE_ARMV6T2_EXTERNAL
sbfx r6, r8, #3, #13
sbfx r12, r7, #3, #13
sbfx r1, r9, #3, #13
@ -151,7 +151,7 @@ function ff_vp8_luma_dc_wht_armv6, export=1
strh r4, [r0], #32
asr r10, r5, #19 @ block[3][3]
#if HAVE_ARMV6T2
#if HAVE_ARMV6T2_EXTERNAL
sbfx r2, r2, #3, #13
sbfx lr, lr, #3, #13
sbfx r3, r3, #3, #13
@ -284,7 +284,7 @@ function ff_vp8_idct_add_armv6, export=1
sxth r12, r12
ldr r9, [r0, r2]
sxth r1, r1
#if HAVE_ARMV6T2
#if HAVE_ARMV6T2_EXTERNAL
sbfx r7, r7, #3, #13
sbfx r10, r10, #3, #13
#else

View File

@ -89,7 +89,7 @@ ELF .size \name, . - \name
\name:
.endm
#if !HAVE_ARMV6T2
#if !HAVE_ARMV6T2_EXTERNAL
.macro movw rd, val
mov \rd, \val & 255
orr \rd, \val & ~255
@ -97,7 +97,7 @@ ELF .size \name, . - \name
#endif
.macro mov32 rd, val
#if HAVE_ARMV6T2
#if HAVE_ARMV6T2_EXTERNAL
movw \rd, #(\val) & 0xffff
.if (\val) >> 16
movt \rd, #(\val) >> 16
@ -146,7 +146,7 @@ T ldr \rd, [\rd]
.macro movrel rd, val
#if CONFIG_PIC
ldpic \rd, \val
#elif HAVE_ARMV6T2 && !defined(__APPLE__)
#elif HAVE_ARMV6T2_EXTERNAL && !defined(__APPLE__)
movw \rd, #:lower16:\val
movt \rd, #:upper16:\val
#else

View File

@ -35,7 +35,7 @@ static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
#elif HAVE_INLINE_ASM
#if HAVE_ARMV6
#if HAVE_ARMV6_INLINE
#define av_bswap16 av_bswap16
static av_always_inline av_const unsigned av_bswap16(unsigned x)
{
@ -48,7 +48,7 @@ static av_always_inline av_const unsigned av_bswap16(unsigned x)
#define av_bswap32 av_bswap32
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
{
#if HAVE_ARMV6
#if HAVE_ARMV6_INLINE
__asm__("rev %0, %0" : "+r"(x));
#else
uint32_t t;
@ -57,7 +57,7 @@ static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
"mov %0, %0, ror #8 \n\t"
"eor %0, %0, %1, lsr #8 \n\t"
: "+r"(x), "=&r"(t));
#endif /* HAVE_ARMV6 */
#endif /* HAVE_ARMV6_INLINE */
return x;
}
#endif /* !AV_GCC_VERSION_AT_LEAST(4,5) */

View File

@ -28,7 +28,7 @@
#if HAVE_INLINE_ASM
#if HAVE_ARMV6
#if HAVE_ARMV6_INLINE
#define av_clip_uint8 av_clip_uint8_arm
static av_always_inline av_const unsigned av_clip_uint8_arm(int a)
@ -86,7 +86,7 @@ static av_always_inline int av_sat_dadd32_arm(int a, int b)
return r;
}
#endif /* HAVE_ARMV6 */
#endif /* HAVE_ARMV6_INLINE */
#if HAVE_ASM_MOD_Q