diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 097cab8285..0a581070a2 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -1018,7 +1018,7 @@ void ff_put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) void ff_avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { - avg_pixels8_mmx(dst, src, stride, 8); + ff_avg_pixels8_mmx(dst, src, stride, 8); } void ff_put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) diff --git a/libavcodec/x86/dsputil_mmx.h b/libavcodec/x86/dsputil_mmx.h index df0c85ad61..eb23377cb3 100644 --- a/libavcodec/x86/dsputil_mmx.h +++ b/libavcodec/x86/dsputil_mmx.h @@ -156,6 +156,8 @@ void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, int line_s void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, int line_size); +void ff_avg_pixels8_mmx(uint8_t *block, const uint8_t *pixels, + ptrdiff_t line_size, int h); void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h); void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, diff --git a/libavcodec/x86/fpel_mmx.c b/libavcodec/x86/fpel_mmx.c index bb8b788024..c42791426e 100644 --- a/libavcodec/x86/fpel_mmx.c +++ b/libavcodec/x86/fpel_mmx.c @@ -29,6 +29,27 @@ #if HAVE_MMX_INLINE +// in case more speed is needed - unroling would certainly help +void ff_avg_pixels8_mmx(uint8_t *block, const uint8_t *pixels, + ptrdiff_t line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + PAVGB_MMX(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { diff --git a/libavcodec/x86/hpeldsp_init.c b/libavcodec/x86/hpeldsp_init.c index b0a87c8c9f..bc2a08f10e 100644 --- a/libavcodec/x86/hpeldsp_init.c +++ b/libavcodec/x86/hpeldsp_init.c @@ -74,6 +74,7 @@ void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels, void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h); +#define avg_pixels8_mmx ff_avg_pixels8_mmx #define put_pixels8_mmx ff_put_pixels8_mmx #define put_pixels16_mmx ff_put_pixels16_mmx #define put_no_rnd_pixels8_mmx ff_put_pixels8_mmx diff --git a/libavcodec/x86/rnd_template.c b/libavcodec/x86/rnd_template.c index 2ff77d7579..08e8593a20 100644 --- a/libavcodec/x86/rnd_template.c +++ b/libavcodec/x86/rnd_template.c @@ -92,28 +92,6 @@ static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, ptrdiff } // avg_pixels -#ifndef NO_RND -// in case more speed is needed - unroling would certainly help -static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} -#endif /* NO_RND */ - static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { MOVQ_BFE(mm6);