1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-02 03:06:28 +02:00
FFmpeg/libavutil/mem_internal.h
Timo Rothenpieler 7945d30e91 avutil/mem: limit alignment to maximum simd align
FFmpeg has instances of DECLARE_ALIGNED(32, ...) in a lot of structs,
which then end up heap-allocated.
By declaring any variable in a struct, or tree of structs, to be 32 byte
aligned, it allows the compiler to safely assume the entire struct
itself is also 32 byte aligned.

This might make the compiler emit code which straight up crashes or
misbehaves in other ways, and at least in one instances is now
documented to actually do (see ticket 10549 on trac).
The issue there is that an unrelated variable in SingleChannelElement is
declared to have an alignment of 32 bytes. So if the compiler does a copy
in decode_cpe() with avx instructions, but ffmpeg is built with
--disable-avx, this results in a crash, since the memory is only 16 byte
aligned.

Mind you, even if the compiler does not emit avx instructions, the code
is still invalid and could misbehave. It just happens not to. Declaring
any variable in a struct with a 32 byte alignment promises 32 byte
alignment of the whole struct to the compiler.

This patch limits the maximum alignment to the maximum possible simd
alignment according to configure.
While not perfect, it at the very least gets rid of a lot of UB, by
matching up the maximum DECLARE_ALIGNED value with the alignment of heap
allocations done by lavu.
2024-02-27 19:41:09 +01:00

162 lines
5.4 KiB
C

/*
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_MEM_INTERNAL_H
#define AVUTIL_MEM_INTERNAL_H
#include "config.h"
#include <stdint.h>
#include "attributes.h"
#include "macros.h"
#include "mem.h"
#include "version.h"
/**
* @def DECLARE_ALIGNED(n,t,v)
* Declare a variable that is aligned in memory.
*
* @code{.c}
* DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
* DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
*
* // The default-alignment equivalent would be
* uint16_t aligned_int = 42;
* uint8_t aligned_array[128];
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
/**
* @def DECLARE_ASM_ALIGNED(n,t,v)
* Declare an aligned variable appropriate for use in inline assembly code.
*
* @code{.c}
* DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
/**
* @def DECLARE_ASM_CONST(n,t,v)
* Declare a static constant aligned variable appropriate for use in inline
* assembly code.
*
* @code{.c}
* DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
#define DECLARE_ALIGNED_T(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
#elif defined(__DJGPP__)
#define DECLARE_ALIGNED_T(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
#define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
#elif defined(__GNUC__) || defined(__clang__)
#define DECLARE_ALIGNED_T(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
#define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
#elif defined(_MSC_VER)
#define DECLARE_ALIGNED_T(n,t,v) __declspec(align(n)) t v
#define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
#define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
#else
#define DECLARE_ALIGNED_T(n,t,v) t v
#define DECLARE_ASM_ALIGNED(n,t,v) t v
#define DECLARE_ASM_CONST(n,t,v) static const t v
#endif
#if HAVE_SIMD_ALIGN_64
#define ALIGN_64 64
#define ALIGN_32 32
#elif HAVE_SIMD_ALIGN_32
#define ALIGN_64 32
#define ALIGN_32 32
#else
#define ALIGN_64 16
#define ALIGN_32 16
#endif
#define DECLARE_ALIGNED(n,t,v) DECLARE_ALIGNED_V(n,t,v)
// Macro needs to be double-wrapped in order to expand
// possible other macros being passed for n.
#define DECLARE_ALIGNED_V(n,t,v) DECLARE_ALIGNED_##n(t,v)
#define DECLARE_ALIGNED_4(t,v) DECLARE_ALIGNED_T( 4, t, v)
#define DECLARE_ALIGNED_8(t,v) DECLARE_ALIGNED_T( 8, t, v)
#define DECLARE_ALIGNED_16(t,v) DECLARE_ALIGNED_T( 16, t, v)
#define DECLARE_ALIGNED_32(t,v) DECLARE_ALIGNED_T(ALIGN_32, t, v)
#define DECLARE_ALIGNED_64(t,v) DECLARE_ALIGNED_T(ALIGN_64, t, v)
// Some broken preprocessors need a second expansion
// to be forced to tokenize __VA_ARGS__
#define E1(x) x
#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
uint8_t la_##v[sizeof(t s o) + (a)]; \
t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
DECLARE_ALIGNED(a, t, la_##v) s o; \
t (*v) o = la_##v
#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
#endif
#endif /* AVUTIL_MEM_INTERNAL_H */