mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-18 03:19:31 +02:00
a51279bbde
x64 always has MMX, MMXEXT, SSE and SSE2 and this means that some functions for MMX, MMXEXT and 3dnow are always overridden by other functions (unless one e.g. explicitly disables SSE2) for x64. So given that the only systems that benefit from these functions are truely ancient 32bit x86s they are removed. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
178 lines
7.3 KiB
C
178 lines
7.3 KiB
C
/*
|
|
* SIMD-optimized halfpel functions are compiled twice for rnd/no_rnd
|
|
* Copyright (c) 2000, 2001 Fabrice Bellard
|
|
* Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
|
|
*
|
|
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
|
|
* mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
|
|
* and improved by Zdenek Kabelac <kabi@users.sf.net>
|
|
*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include "inline_asm.h"
|
|
|
|
// put_pixels
|
|
av_unused STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels,
|
|
ptrdiff_t line_size, int h)
|
|
{
|
|
MOVQ_ZERO(mm7);
|
|
SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
|
|
__asm__ volatile(
|
|
"movq (%1), %%mm0 \n\t"
|
|
"movq 1(%1), %%mm4 \n\t"
|
|
"movq %%mm0, %%mm1 \n\t"
|
|
"movq %%mm4, %%mm5 \n\t"
|
|
"punpcklbw %%mm7, %%mm0 \n\t"
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
|
"punpckhbw %%mm7, %%mm1 \n\t"
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
"paddusw %%mm0, %%mm4 \n\t"
|
|
"paddusw %%mm1, %%mm5 \n\t"
|
|
"xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
|
|
"add %3, %1 \n\t"
|
|
".p2align 3 \n\t"
|
|
"1: \n\t"
|
|
"movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
|
|
"movq 1(%1, %%"FF_REG_a"), %%mm2 \n\t"
|
|
"movq %%mm0, %%mm1 \n\t"
|
|
"movq %%mm2, %%mm3 \n\t"
|
|
"punpcklbw %%mm7, %%mm0 \n\t"
|
|
"punpcklbw %%mm7, %%mm2 \n\t"
|
|
"punpckhbw %%mm7, %%mm1 \n\t"
|
|
"punpckhbw %%mm7, %%mm3 \n\t"
|
|
"paddusw %%mm2, %%mm0 \n\t"
|
|
"paddusw %%mm3, %%mm1 \n\t"
|
|
"paddusw %%mm6, %%mm4 \n\t"
|
|
"paddusw %%mm6, %%mm5 \n\t"
|
|
"paddusw %%mm0, %%mm4 \n\t"
|
|
"paddusw %%mm1, %%mm5 \n\t"
|
|
"psrlw $2, %%mm4 \n\t"
|
|
"psrlw $2, %%mm5 \n\t"
|
|
"packuswb %%mm5, %%mm4 \n\t"
|
|
"movq %%mm4, (%2, %%"FF_REG_a") \n\t"
|
|
"add %3, %%"FF_REG_a" \n\t"
|
|
|
|
"movq (%1, %%"FF_REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
|
|
"movq 1(%1, %%"FF_REG_a"), %%mm4 \n\t"
|
|
"movq %%mm2, %%mm3 \n\t"
|
|
"movq %%mm4, %%mm5 \n\t"
|
|
"punpcklbw %%mm7, %%mm2 \n\t"
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
|
"punpckhbw %%mm7, %%mm3 \n\t"
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
"paddusw %%mm2, %%mm4 \n\t"
|
|
"paddusw %%mm3, %%mm5 \n\t"
|
|
"paddusw %%mm6, %%mm0 \n\t"
|
|
"paddusw %%mm6, %%mm1 \n\t"
|
|
"paddusw %%mm4, %%mm0 \n\t"
|
|
"paddusw %%mm5, %%mm1 \n\t"
|
|
"psrlw $2, %%mm0 \n\t"
|
|
"psrlw $2, %%mm1 \n\t"
|
|
"packuswb %%mm1, %%mm0 \n\t"
|
|
"movq %%mm0, (%2, %%"FF_REG_a") \n\t"
|
|
"add %3, %%"FF_REG_a" \n\t"
|
|
|
|
"subl $2, %0 \n\t"
|
|
"jnz 1b \n\t"
|
|
:"+g"(h), "+S"(pixels)
|
|
:"D"(block), "r"((x86_reg)line_size)
|
|
:FF_REG_a, "memory");
|
|
}
|
|
|
|
#ifndef NO_AVG
|
|
// avg_pixels
|
|
// this routine is 'slightly' suboptimal but mostly unused
|
|
av_unused STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels,
|
|
ptrdiff_t line_size, int h)
|
|
{
|
|
MOVQ_ZERO(mm7);
|
|
SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
|
|
__asm__ volatile(
|
|
"movq (%1), %%mm0 \n\t"
|
|
"movq 1(%1), %%mm4 \n\t"
|
|
"movq %%mm0, %%mm1 \n\t"
|
|
"movq %%mm4, %%mm5 \n\t"
|
|
"punpcklbw %%mm7, %%mm0 \n\t"
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
|
"punpckhbw %%mm7, %%mm1 \n\t"
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
"paddusw %%mm0, %%mm4 \n\t"
|
|
"paddusw %%mm1, %%mm5 \n\t"
|
|
"xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
|
|
"add %3, %1 \n\t"
|
|
".p2align 3 \n\t"
|
|
"1: \n\t"
|
|
"movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
|
|
"movq 1(%1, %%"FF_REG_a"), %%mm2 \n\t"
|
|
"movq %%mm0, %%mm1 \n\t"
|
|
"movq %%mm2, %%mm3 \n\t"
|
|
"punpcklbw %%mm7, %%mm0 \n\t"
|
|
"punpcklbw %%mm7, %%mm2 \n\t"
|
|
"punpckhbw %%mm7, %%mm1 \n\t"
|
|
"punpckhbw %%mm7, %%mm3 \n\t"
|
|
"paddusw %%mm2, %%mm0 \n\t"
|
|
"paddusw %%mm3, %%mm1 \n\t"
|
|
"paddusw %%mm6, %%mm4 \n\t"
|
|
"paddusw %%mm6, %%mm5 \n\t"
|
|
"paddusw %%mm0, %%mm4 \n\t"
|
|
"paddusw %%mm1, %%mm5 \n\t"
|
|
"psrlw $2, %%mm4 \n\t"
|
|
"psrlw $2, %%mm5 \n\t"
|
|
"movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
|
|
"packuswb %%mm5, %%mm4 \n\t"
|
|
"pcmpeqd %%mm2, %%mm2 \n\t"
|
|
"paddb %%mm2, %%mm2 \n\t"
|
|
PAVGB_MMX(%%mm3, %%mm4, %%mm5, %%mm2)
|
|
"movq %%mm5, (%2, %%"FF_REG_a") \n\t"
|
|
"add %3, %%"FF_REG_a" \n\t"
|
|
|
|
"movq (%1, %%"FF_REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
|
|
"movq 1(%1, %%"FF_REG_a"), %%mm4 \n\t"
|
|
"movq %%mm2, %%mm3 \n\t"
|
|
"movq %%mm4, %%mm5 \n\t"
|
|
"punpcklbw %%mm7, %%mm2 \n\t"
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
|
"punpckhbw %%mm7, %%mm3 \n\t"
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
"paddusw %%mm2, %%mm4 \n\t"
|
|
"paddusw %%mm3, %%mm5 \n\t"
|
|
"paddusw %%mm6, %%mm0 \n\t"
|
|
"paddusw %%mm6, %%mm1 \n\t"
|
|
"paddusw %%mm4, %%mm0 \n\t"
|
|
"paddusw %%mm5, %%mm1 \n\t"
|
|
"psrlw $2, %%mm0 \n\t"
|
|
"psrlw $2, %%mm1 \n\t"
|
|
"movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
|
|
"packuswb %%mm1, %%mm0 \n\t"
|
|
"pcmpeqd %%mm2, %%mm2 \n\t"
|
|
"paddb %%mm2, %%mm2 \n\t"
|
|
PAVGB_MMX(%%mm3, %%mm0, %%mm1, %%mm2)
|
|
"movq %%mm1, (%2, %%"FF_REG_a") \n\t"
|
|
"add %3, %%"FF_REG_a" \n\t"
|
|
|
|
"subl $2, %0 \n\t"
|
|
"jnz 1b \n\t"
|
|
:"+g"(h), "+S"(pixels)
|
|
:"D"(block), "r"((x86_reg)line_size)
|
|
:FF_REG_a, "memory");
|
|
}
|
|
#endif
|