1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-04 22:03:09 +02:00

checkasm: hevc_pel: Use helpers for checking for writes out of bounds

This allows catching whether the functions write outside of
the designated rectangle, and if run with "checkasm -v", it also
prints out on which side of the rectangle the overwrite was.

Signed-off-by: Martin Storsjö <martin@martin.st>
This commit is contained in:
Martin Storsjö
2025-03-26 00:09:07 +02:00
parent f6cd9bfa82
commit 4d4b301e4a

View File

@ -44,9 +44,14 @@ static const int offsets[] = {0, 255, -1 };
uint32_t r = rnd() & mask; \
AV_WN32A(buf0 + k, r); \
AV_WN32A(buf1 + k, r); \
if (k >= BUF_SIZE) \
continue; \
r = rnd(); \
} \
} while (0)
#define randomize_buffers_dst() \
do { \
int k; \
for (k = 0; k < BUF_SIZE; k += 4) { \
uint32_t r = rnd(); \
AV_WN32A(dst0 + k, r); \
AV_WN32A(dst1 + k, r); \
} \
@ -100,6 +105,7 @@ static void checkasm_check_hevc_qpel(void)
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
randomize_buffers();
randomize_buffers_dst();
call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
checkasm_check(int16_t, dstw0, MAX_PB_SIZE * sizeof(int16_t),
@ -118,8 +124,8 @@ static void checkasm_check_hevc_qpel_uni(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
HEVCDSPContext h;
int size, bit_depth, i, j;
@ -143,16 +149,18 @@ static void checkasm_check_hevc_qpel_uni(void)
if (check_func(h.put_hevc_qpel_uni[size][j][i],
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
randomize_buffers();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
}
@ -167,8 +175,8 @@ static void checkasm_check_hevc_qpel_uni_w(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
HEVCDSPContext h;
int size, bit_depth, i, j;
@ -196,16 +204,18 @@ static void checkasm_check_hevc_qpel_uni_w(void)
for (wx = weights; *wx >= 0; wx++) {
for (ox = offsets; *ox >= 0; ox++) {
randomize_buffers();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
}
@ -223,8 +233,8 @@ static void checkasm_check_hevc_qpel_bi(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
@ -251,16 +261,18 @@ static void checkasm_check_hevc_qpel_bi(void)
if (check_func(h.put_hevc_qpel_bi[size][j][i],
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
randomize_buffers_ref();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
ref0, sizes[size], i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], i, j, sizes[size]);
}
@ -275,8 +287,8 @@ static void checkasm_check_hevc_qpel_bi_w(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
@ -308,16 +320,18 @@ static void checkasm_check_hevc_qpel_bi_w(void)
for (wx = weights; *wx >= 0; wx++) {
for (ox = offsets; *ox >= 0; ox++) {
randomize_buffers_ref();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
}
@ -364,6 +378,7 @@ static void checkasm_check_hevc_epel(void)
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
randomize_buffers();
randomize_buffers_dst();
call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
checkasm_check(int16_t, dstw0, MAX_PB_SIZE * sizeof(int16_t),
@ -382,8 +397,8 @@ static void checkasm_check_hevc_epel_uni(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
HEVCDSPContext h;
int size, bit_depth, i, j;
@ -407,16 +422,18 @@ static void checkasm_check_hevc_epel_uni(void)
if (check_func(h.put_hevc_epel_uni[size][j][i],
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
randomize_buffers();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], i, j, sizes[size]);
}
@ -431,8 +448,8 @@ static void checkasm_check_hevc_epel_uni_w(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
HEVCDSPContext h;
int size, bit_depth, i, j;
@ -460,16 +477,18 @@ static void checkasm_check_hevc_epel_uni_w(void)
for (wx = weights; *wx >= 0; wx++) {
for (ox = offsets; *ox >= 0; ox++) {
randomize_buffers();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
}
@ -487,8 +506,8 @@ static void checkasm_check_hevc_epel_bi(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
@ -515,16 +534,18 @@ static void checkasm_check_hevc_epel_bi(void)
if (check_func(h.put_hevc_epel_bi[size][j][i],
"put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
randomize_buffers_ref();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
ref0, sizes[size], i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], i, j, sizes[size]);
}
@ -539,8 +560,8 @@ static void checkasm_check_hevc_epel_bi_w(void)
{
LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
PIXEL_RECT(dst0, 64, 64);
PIXEL_RECT(dst1, 64, 64);
LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
@ -572,16 +593,18 @@ static void checkasm_check_hevc_epel_bi_w(void)
for (wx = weights; *wx >= 0; wx++) {
for (ox = offsets; *ox >= 0; ox++) {
randomize_buffers_ref();
call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
CLEAR_PIXEL_RECT(dst0);
CLEAR_PIXEL_RECT(dst1);
call_ref(dst0, dst0_stride,
src0, sizes[size] * SIZEOF_PIXEL,
ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
call_new(dst1, sizes[size] * SIZEOF_PIXEL,
call_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
dst1, sizes[size] * SIZEOF_PIXEL,
size[sizes], size[sizes], "dst");
bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
checkasm_check_pixel_padded(dst0, dst0_stride,
dst1, dst1_stride,
size[sizes], size[sizes], "dst");
bench_new(dst1, dst1_stride,
src1, sizes[size] * SIZEOF_PIXEL,
ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
}