From 0f598457089023f86829b34fef582f3810c491b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Wed, 3 Apr 2013 04:26:14 -0700 Subject: [PATCH 1/2] bfin: Make vp3 functions static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Martin Storsjö --- libavcodec/bfin/vp3_bfin.c | 8 ++++---- libavcodec/bfin/vp3_bfin.h | 2 -- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/libavcodec/bfin/vp3_bfin.c b/libavcodec/bfin/vp3_bfin.c index ee45e3cf71..ca6702b317 100644 --- a/libavcodec/bfin/vp3_bfin.c +++ b/libavcodec/bfin/vp3_bfin.c @@ -28,7 +28,7 @@ #include "vp3_bfin.h" /* Intra iDCT offset 128 */ -void ff_bfin_vp3_idct_put (uint8_t *dest, int line_size, int16_t *block) +static void bfin_vp3_idct_put(uint8_t *dest, int line_size, int16_t *block) { uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + 128; int i,j; @@ -43,7 +43,7 @@ void ff_bfin_vp3_idct_put (uint8_t *dest, int line_size, int16_t *block) } /* Inter iDCT */ -void ff_bfin_vp3_idct_add (uint8_t *dest, int line_size, int16_t *block) +static void bfin_vp3_idct_add(uint8_t *dest, int line_size, int16_t *block) { ff_bfin_vp3_idct (block); ff_bfin_add_pixels_clamped (block, dest, line_size); @@ -54,7 +54,7 @@ void ff_bfin_vp3_idct_add (uint8_t *dest, int line_size, int16_t *block) av_cold void ff_vp3dsp_init_bfin(VP3DSPContext *c, int flags) { if (!(flags & CODEC_FLAG_BITEXACT)) { - c->idct_add = ff_bfin_vp3_idct_add; - c->idct_put = ff_bfin_vp3_idct_put; + c->idct_add = bfin_vp3_idct_add; + c->idct_put = bfin_vp3_idct_put; } } diff --git a/libavcodec/bfin/vp3_bfin.h b/libavcodec/bfin/vp3_bfin.h index 08b050e4c0..5a2c5a40f1 100644 --- a/libavcodec/bfin/vp3_bfin.h +++ b/libavcodec/bfin/vp3_bfin.h @@ -23,7 +23,5 @@ #include void ff_bfin_vp3_idct(int16_t *block); -void ff_bfin_vp3_idct_put(uint8_t *dest, int line_size, int16_t *block); -void ff_bfin_vp3_idct_add(uint8_t *dest, int line_size, int16_t *block); #endif /* AVCODEC_BFIN_VP3_BFIN_H */ From 610b18e2e3d8ef5eca3e78f33a0625689b8d2bb9 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 8 Mar 2013 20:40:16 -0800 Subject: [PATCH 2/2] x86: qpel: Move fullpel and l2 functions to a separate file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way, they can be shared between mpeg4qpel and h264qpel without requiring either one to be compiled unconditionally. Signed-off-by: Martin Storsjö --- libavcodec/x86/Makefile | 1 + libavcodec/x86/dsputil.asm | 192 ---------------------------- libavcodec/x86/hpeldsp.asm | 41 ------ libavcodec/x86/qpel.asm | 256 +++++++++++++++++++++++++++++++++++++ 4 files changed, 257 insertions(+), 233 deletions(-) create mode 100644 libavcodec/x86/qpel.asm diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index a759e6e9e0..cc60f2f0b3 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -76,3 +76,4 @@ YASM-OBJS += x86/dsputil.o \ x86/fmtconvert.o \ x86/hpeldsp.o \ x86/mpeg4qpel.o \ + x86/qpel.o \ diff --git a/libavcodec/x86/dsputil.asm b/libavcodec/x86/dsputil.asm index 078f58c8d4..5d73ff8ee4 100644 --- a/libavcodec/x86/dsputil.asm +++ b/libavcodec/x86/dsputil.asm @@ -648,195 +648,3 @@ BSWAP32_BUF INIT_XMM ssse3 BSWAP32_BUF - - -; FIXME: All of the code below should be put back in h264_qpel_8bit.asm. -; Unfortunately it is unconditionally used from dsputil_mmx.c since 71155d7 .. - -%macro op_avgh 3 - movh %3, %2 - pavgb %1, %3 - movh %2, %1 -%endmacro - -%macro op_avg 2 - pavgb %1, %2 - mova %2, %1 -%endmacro - -%macro op_puth 2-3 - movh %2, %1 -%endmacro - -%macro op_put 2 - mova %2, %1 -%endmacro - -; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS4_L2 1 -%define OP op_%1h -cglobal %1_pixels4_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - movd m0, [r1] - movd m1, [r2] - add r1, r4 - add r2, 4 - pavgb m0, m1 - OP m0, [r0], m3 - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2] - pavgb m1, [r2+4] - OP m0, [r0], m3 - OP m1, [r0+r3], m3 - lea r0, [r0+2*r3] - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2+8] - pavgb m1, [r2+12] - OP m0, [r0], m3 - OP m1, [r0+r3], m3 - lea r0, [r0+2*r3] - add r2, 16 - sub r5d, 4 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS4_L2 put -PIXELS4_L2 avg - -; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS8_L2 1 -%define OP op_%1 -cglobal %1_pixels8_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r2] - add r1, r4 - add r2, 8 - pavgb m0, m1 - OP m0, [r0] - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2] - pavgb m1, [r2+8] - OP m0, [r0] - OP m1, [r0+r3] - lea r0, [r0+2*r3] - mova m0, [r1] - mova m1, [r1+r4] - lea r1, [r1+2*r4] - pavgb m0, [r2+16] - pavgb m1, [r2+24] - OP m0, [r0] - OP m1, [r0+r3] - lea r0, [r0+2*r3] - add r2, 32 - sub r5d, 4 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS8_L2 put -PIXELS8_L2 avg - -; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PIXELS16_L2 1 -%define OP op_%1 -cglobal %1_pixels16_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r1+8] - pavgb m0, [r2] - pavgb m1, [r2+8] - add r1, r4 - add r2, 16 - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - pavgb m0, [r2] - pavgb m1, [r2+8] - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - pavgb m0, [r2+16] - pavgb m1, [r2+24] - OP m0, [r0] - OP m1, [r0+8] - add r0, r3 - add r2, 32 - sub r5d, 2 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PIXELS16_L2 put -PIXELS16_L2 avg - -INIT_MMX mmxext -; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h) -%macro PIXELS48 2 -%if %2 == 4 -%define OP movh -%else -%define OP mova -%endif -cglobal %1_pixels%2, 4,5 - movsxdifnidn r2, r2d - lea r4, [r2*3] -.loop: - OP m0, [r1] - OP m1, [r1+r2] - OP m2, [r1+r2*2] - OP m3, [r1+r4] - lea r1, [r1+r2*4] -%ifidn %1, avg - pavgb m0, [r0] - pavgb m1, [r0+r2] - pavgb m2, [r0+r2*2] - pavgb m3, [r0+r4] -%endif - OP [r0], m0 - OP [r0+r2], m1 - OP [r0+r2*2], m2 - OP [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jne .loop - RET -%endmacro - -PIXELS48 put, 4 -PIXELS48 avg, 4 -PIXELS48 put, 8 -PIXELS48 avg, 8 diff --git a/libavcodec/x86/hpeldsp.asm b/libavcodec/x86/hpeldsp.asm index d38186c857..ec04d99e77 100644 --- a/libavcodec/x86/hpeldsp.asm +++ b/libavcodec/x86/hpeldsp.asm @@ -452,44 +452,3 @@ INIT_MMX mmxext AVG_PIXELS8_XY2 INIT_MMX 3dnow AVG_PIXELS8_XY2 - -INIT_XMM sse2 -; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) -cglobal put_pixels16, 4,5,4 - lea r4, [r2*3] -.loop: - movu m0, [r1] - movu m1, [r1+r2] - movu m2, [r1+r2*2] - movu m3, [r1+r4] - lea r1, [r1+r2*4] - mova [r0], m0 - mova [r0+r2], m1 - mova [r0+r2*2], m2 - mova [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jnz .loop - REP_RET - -; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) -cglobal avg_pixels16, 4,5,4 - lea r4, [r2*3] -.loop: - movu m0, [r1] - movu m1, [r1+r2] - movu m2, [r1+r2*2] - movu m3, [r1+r4] - lea r1, [r1+r2*4] - pavgb m0, [r0] - pavgb m1, [r0+r2] - pavgb m2, [r0+r2*2] - pavgb m3, [r0+r4] - mova [r0], m0 - mova [r0+r2], m1 - mova [r0+r2*2], m2 - mova [r0+r4], m3 - sub r3d, 4 - lea r0, [r0+r2*4] - jnz .loop - REP_RET diff --git a/libavcodec/x86/qpel.asm b/libavcodec/x86/qpel.asm new file mode 100644 index 0000000000..ba97e2aec8 --- /dev/null +++ b/libavcodec/x86/qpel.asm @@ -0,0 +1,256 @@ +;****************************************************************************** +;* MMX optimized DSP utils +;* Copyright (c) 2008 Loren Merritt +;* Copyright (c) 2003-2013 Michael Niedermayer +;* Copyright (c) 2013 Daniel Kang +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION .text + +%macro op_avgh 3 + movh %3, %2 + pavgb %1, %3 + movh %2, %1 +%endmacro + +%macro op_avg 2 + pavgb %1, %2 + mova %2, %1 +%endmacro + +%macro op_puth 2-3 + movh %2, %1 +%endmacro + +%macro op_put 2 + mova %2, %1 +%endmacro + +; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS4_L2 1 +%define OP op_%1h +cglobal %1_pixels4_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + movd m0, [r1] + movd m1, [r2] + add r1, r4 + add r2, 4 + pavgb m0, m1 + OP m0, [r0], m3 + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2] + pavgb m1, [r2+4] + OP m0, [r0], m3 + OP m1, [r0+r3], m3 + lea r0, [r0+2*r3] + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2+8] + pavgb m1, [r2+12] + OP m0, [r0], m3 + OP m1, [r0+r3], m3 + lea r0, [r0+2*r3] + add r2, 16 + sub r5d, 4 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS4_L2 put +PIXELS4_L2 avg + +; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS8_L2 1 +%define OP op_%1 +cglobal %1_pixels8_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r2] + add r1, r4 + add r2, 8 + pavgb m0, m1 + OP m0, [r0] + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2] + pavgb m1, [r2+8] + OP m0, [r0] + OP m1, [r0+r3] + lea r0, [r0+2*r3] + mova m0, [r1] + mova m1, [r1+r4] + lea r1, [r1+2*r4] + pavgb m0, [r2+16] + pavgb m1, [r2+24] + OP m0, [r0] + OP m1, [r0+r3] + lea r0, [r0+2*r3] + add r2, 32 + sub r5d, 4 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS8_L2 put +PIXELS8_L2 avg + +; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PIXELS16_L2 1 +%define OP op_%1 +cglobal %1_pixels16_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r1+8] + pavgb m0, [r2] + pavgb m1, [r2+8] + add r1, r4 + add r2, 16 + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + pavgb m0, [r2] + pavgb m1, [r2+8] + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + pavgb m0, [r2+16] + pavgb m1, [r2+24] + OP m0, [r0] + OP m1, [r0+8] + add r0, r3 + add r2, 32 + sub r5d, 2 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PIXELS16_L2 put +PIXELS16_L2 avg + +INIT_MMX mmxext +; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h) +%macro PIXELS48 2 +%if %2 == 4 +%define OP movh +%else +%define OP mova +%endif +cglobal %1_pixels%2, 4,5 + movsxdifnidn r2, r2d + lea r4, [r2*3] +.loop: + OP m0, [r1] + OP m1, [r1+r2] + OP m2, [r1+r2*2] + OP m3, [r1+r4] + lea r1, [r1+r2*4] +%ifidn %1, avg + pavgb m0, [r0] + pavgb m1, [r0+r2] + pavgb m2, [r0+r2*2] + pavgb m3, [r0+r4] +%endif + OP [r0], m0 + OP [r0+r2], m1 + OP [r0+r2*2], m2 + OP [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jne .loop + RET +%endmacro + +PIXELS48 put, 4 +PIXELS48 avg, 4 +PIXELS48 put, 8 +PIXELS48 avg, 8 + + +INIT_XMM sse2 +; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) +cglobal put_pixels16, 4,5,4 + lea r4, [r2*3] +.loop: + movu m0, [r1] + movu m1, [r1+r2] + movu m2, [r1+r2*2] + movu m3, [r1+r4] + lea r1, [r1+r2*4] + mova [r0], m0 + mova [r0+r2], m1 + mova [r0+r2*2], m2 + mova [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jnz .loop + REP_RET + +; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) +cglobal avg_pixels16, 4,5,4 + lea r4, [r2*3] +.loop: + movu m0, [r1] + movu m1, [r1+r2] + movu m2, [r1+r2*2] + movu m3, [r1+r4] + lea r1, [r1+r2*4] + pavgb m0, [r0] + pavgb m1, [r0+r2] + pavgb m2, [r0+r2*2] + pavgb m3, [r0+r4] + mova [r0], m0 + mova [r0+r2], m1 + mova [r0+r2*2], m2 + mova [r0+r4], m3 + sub r3d, 4 + lea r0, [r0+r2*4] + jnz .loop + REP_RET