1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

checkasm: add VP9 loopfilter tests.

The randomize_buffer() implementation assures that "most of the time",
we'll do a good mix of wide16/wide8/hev/regular/no filters for complete
code coverage. However, this is not mathematically assured because that
would make the code either much more complex, or much less random.
This commit is contained in:
Ronald S. Bultje 2015-09-17 11:58:10 -04:00
parent 784792788b
commit b074367405

View File

@ -94,6 +94,152 @@ static void check_ipred(void)
#undef randomize_buffers
#define setpx(a,b,c) \
do { \
if (SIZEOF_PIXEL == 1) { \
buf0[(a) + (b) * jstride] = c; \
} else { \
((uint16_t *)buf0)[(a) + (b) * jstride] = c; \
} \
} while (0)
#define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1)))
#define setsx(a,b,c,d) setdx(a,b,c,(d) << (bit_depth - 8))
#define randomize_buffers(bidx, lineoff, str) \
do { \
uint32_t mask = (1 << bit_depth) - 1; \
int off = dir ? lineoff : lineoff * 16; \
int istride = dir ? 1 : 16; \
int jstride = dir ? str : 1; \
int i, j; \
for (i = 0; i < 2; i++) /* flat16 */ { \
int idx = off + i * istride, p0, q0; \
setpx(idx, 0, q0 = rnd() & mask); \
setsx(idx, -1, p0 = q0, E[bidx] >> 2); \
for (j = 1; j < 8; j++) { \
setsx(idx, -1 - j, p0, F[bidx]); \
setsx(idx, j, q0, F[bidx]); \
} \
} \
for (i = 2; i < 4; i++) /* flat8 */ { \
int idx = off + i * istride, p0, q0; \
setpx(idx, 0, q0 = rnd() & mask); \
setsx(idx, -1, p0 = q0, E[bidx] >> 2); \
for (j = 1; j < 4; j++) { \
setsx(idx, -1 - j, p0, F[bidx]); \
setsx(idx, j, q0, F[bidx]); \
} \
for (j = 4; j < 8; j++) { \
setpx(idx, -1 - j, rnd() & mask); \
setpx(idx, j, rnd() & mask); \
} \
} \
for (i = 4; i < 6; i++) /* regular */ { \
int idx = off + i * istride, p2, p1, p0, q0, q1, q2; \
setpx(idx, 0, q0 = rnd() & mask); \
setsx(idx, 1, q1 = q0, I[bidx]); \
setsx(idx, 2, q2 = q1, I[bidx]); \
setsx(idx, 3, q2, I[bidx]); \
setsx(idx, -1, p0 = q0, E[bidx] >> 2); \
setsx(idx, -2, p1 = p0, I[bidx]); \
setsx(idx, -3, p2 = p1, I[bidx]); \
setsx(idx, -4, p2, I[bidx]); \
for (j = 4; j < 8; j++) { \
setpx(idx, -1 - j, rnd() & mask); \
setpx(idx, j, rnd() & mask); \
} \
} \
for (i = 6; i < 8; i++) /* off */ { \
int idx = off + i * istride; \
for (j = 0; j < 8; j++) { \
setpx(idx, -1 - j, rnd() & mask); \
setpx(idx, j, rnd() & mask); \
} \
} \
} while (0)
static void check_loopfilter()
{
LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]);
LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]);
VP9DSPContext dsp;
int dir, wd, wd2, bit_depth;
static const char *const dir_name[2] = { "h", "v" };
int E[2] = { 20, 28 }, I[2] = { 10, 16 }, H[2] = { 7, 11 }, F[2] = { 1, 1 };
declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H);
for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
ff_vp9dsp_init(&dsp, bit_depth, 0);
for (dir = 0; dir < 2; dir++) {
uint8_t *buf0, *buf1;
int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL;
int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL;
buf0 = base0 + midoff_aligned;
buf1 = base1 + midoff_aligned;
for (wd = 0; wd < 3; wd++) {
// 4/8/16wd_8px
if (check_func(dsp.loop_filter_8[wd][dir],
"vp9_loop_filter_%s_%d_8_%dbpp",
dir_name[dir], 4 << wd, bit_depth)) {
randomize_buffers(0, 0, 8);
memcpy(buf1 - midoff, buf0 - midoff,
16 * 8 * SIZEOF_PIXEL);
call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]);
}
}
midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL;
midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL;
// 16wd_16px loopfilter
if (check_func(dsp.loop_filter_16[dir],
"vp9_loop_filter_%s_16_16_%dbpp",
dir_name[dir], bit_depth)) {
randomize_buffers(0, 0, 16);
randomize_buffers(0, 8, 16);
memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]);
}
for (wd = 0; wd < 2; wd++) {
for (wd2 = 0; wd2 < 2; wd2++) {
// mix2 loopfilter
if (check_func(dsp.loop_filter_mix2[wd][wd2][dir],
"vp9_loop_filter_mix2_%s_%d%d_16_%dbpp",
dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) {
randomize_buffers(0, 0, 16);
randomize_buffers(1, 8, 16);
memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL);
#define M(a) ((a[1] << 8) | a[0])
call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL))
fail();
bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H));
#undef M
}
}
}
}
}
report("loopfilter");
}
#undef setsx
#undef setpx
#undef setdx
#undef randomize_buffers
#define DST_BUF_SIZE (size * size * SIZEOF_PIXEL)
#define SRC_BUF_STRIDE 72
#define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL)
@ -187,5 +333,6 @@ static void check_mc(void)
void checkasm_check_vp9dsp(void)
{
check_ipred();
check_loopfilter();
check_mc();
}