2001-07-22 17:18:56 +03:00
|
|
|
/*
|
|
|
|
* MMX optimized DSP utils
|
2009-01-19 17:46:40 +02:00
|
|
|
* Copyright (c) 2000, 2001 Fabrice Bellard
|
2004-01-10 18:04:55 +02:00
|
|
|
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
2001-07-22 17:18:56 +03:00
|
|
|
*
|
2006-10-07 18:30:46 +03:00
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
2002-05-26 01:45:33 +03:00
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2006-10-07 18:30:46 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2001-07-22 17:18:56 +03:00
|
|
|
*
|
2006-10-07 18:30:46 +03:00
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
2001-07-22 17:18:56 +03:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2002-05-26 01:45:33 +03:00
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
2001-07-22 17:18:56 +03:00
|
|
|
*
|
2002-05-26 01:45:33 +03:00
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2006-10-07 18:30:46 +03:00
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
2006-01-13 00:43:26 +02:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
2001-07-22 17:18:56 +03:00
|
|
|
*
|
|
|
|
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
|
|
|
|
*/
|
|
|
|
|
2013-05-08 02:45:39 +03:00
|
|
|
#include "config.h"
|
2013-05-14 11:55:28 +03:00
|
|
|
#include "libavutil/avassert.h"
|
2010-09-04 12:59:08 +03:00
|
|
|
#include "libavutil/cpu.h"
|
2012-08-08 15:51:52 +03:00
|
|
|
#include "libavutil/x86/asm.h"
|
2013-02-11 02:59:21 +03:00
|
|
|
#include "libavcodec/videodsp.h"
|
2013-05-08 01:50:17 +03:00
|
|
|
#include "constants.h"
|
2013-05-08 02:52:57 +03:00
|
|
|
#include "dsputil_x86.h"
|
2011-10-06 18:57:17 +03:00
|
|
|
#include "diracdsp_mmx.h"
|
2001-07-22 17:18:56 +03:00
|
|
|
|
2012-07-23 00:14:20 +03:00
|
|
|
#if HAVE_INLINE_ASM
|
|
|
|
|
2013-01-20 03:02:29 +03:00
|
|
|
void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
|
2012-03-06 15:00:42 +03:00
|
|
|
int line_size)
|
2001-07-22 17:18:56 +03:00
|
|
|
{
|
2013-01-20 03:02:29 +03:00
|
|
|
const int16_t *p;
|
2003-02-11 18:35:48 +02:00
|
|
|
uint8_t *pix;
|
2001-07-22 17:18:56 +03:00
|
|
|
|
|
|
|
/* read the pixels */
|
2012-03-06 15:00:42 +03:00
|
|
|
p = block;
|
2001-07-22 17:18:56 +03:00
|
|
|
pix = pixels;
|
2002-02-18 11:40:05 +02:00
|
|
|
/* unrolled loop */
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
dsputil_mmx: fix incorrect assembly code
In ff_put_pixels_clamped_mmx(), there are two assembly code blocks.
In the first block (in the unrolled loop), the instructions
"movq 8%3, %%mm1 \n\t", and so forth, have problems.
From above instruction, it is clear what the programmer wants: a load from
p + 8. But this assembly code doesn’t guarantee that. It only works if the
compiler puts p in a register to produce an instruction like this:
"movq 8(%edi), %mm1". During compiler optimization, it is possible that the
compiler will be able to constant propagate into p. Suppose p = &x[10000].
Then operand 3 can become 10000(%edi), where %edi holds &x. And the instruction
becomes "movq 810000(%edx)". That is, it will stride by 810000 instead of 8.
This will cause a segmentation fault.
This error was fixed in the second block of the assembly code, but not in
the unrolled loop.
How to reproduce:
This error is exposed when we build using Intel C++ Compiler, with
IPO+PGO optimization enabled. Crashed when decoding an MJPEG video.
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Signed-off-by: Derek Buitenhuis <derek.buitenhuis@gmail.com>
2012-07-24 01:51:10 +03:00
|
|
|
"movq (%3), %%mm0 \n\t"
|
|
|
|
"movq 8(%3), %%mm1 \n\t"
|
|
|
|
"movq 16(%3), %%mm2 \n\t"
|
|
|
|
"movq 24(%3), %%mm3 \n\t"
|
|
|
|
"movq 32(%3), %%mm4 \n\t"
|
|
|
|
"movq 40(%3), %%mm5 \n\t"
|
|
|
|
"movq 48(%3), %%mm6 \n\t"
|
|
|
|
"movq 56(%3), %%mm7 \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
"packuswb %%mm1, %%mm0 \n\t"
|
|
|
|
"packuswb %%mm3, %%mm2 \n\t"
|
|
|
|
"packuswb %%mm5, %%mm4 \n\t"
|
|
|
|
"packuswb %%mm7, %%mm6 \n\t"
|
|
|
|
"movq %%mm0, (%0) \n\t"
|
|
|
|
"movq %%mm2, (%0, %1) \n\t"
|
|
|
|
"movq %%mm4, (%0, %1, 2) \n\t"
|
|
|
|
"movq %%mm6, (%0, %2) \n\t"
|
|
|
|
:: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
|
dsputil_mmx: fix incorrect assembly code
In file libavcodec/x86/dsputil_mmx.c, function ff_put_pixels_clamped_mmx(), there are two assembly code blocks. In the first block (in the unrolled loop), the instructions "movq 8%3, %%mm1 \n\t" etc have problem.
For above instruction, it is clear what the programmer wants: a load from p + 8. But this assembly code doesn’t guarantee that. It only works if the compiler puts p in a register to produce an instruction like this: “movq 8(%edi), %mm1”. During compiler optimization, it is possible that the compiler will be able to constant propagate into p. Suppose p = &x[10000]. Then operand 3 can become 10000(%edi), where %edi holds &x. And the instruction becomes “movq 810000(%edx)”. That is, it will stride by 810000 instead of 8.
This will cause the segmentation fault.
This error was fixed in the second block of the assembly code, but not in the unrolled loop.
How to reproduce:
This error is exposed when we build the ffmpeg using Intel C++ Compiler, IPO+PGO optimization. The ffmpeg was crashed when decoding a mjpeg video.
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2012-07-24 01:51:10 +03:00
|
|
|
"r"(p)
|
2012-03-16 20:42:01 +03:00
|
|
|
: "memory");
|
2012-03-06 15:00:42 +03:00
|
|
|
pix += line_size * 4;
|
|
|
|
p += 32;
|
2002-02-18 11:40:05 +02:00
|
|
|
|
|
|
|
// if here would be an exact copy of the code above
|
|
|
|
// compiler would generate some very strange code
|
|
|
|
// thus using "r"
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movq (%3), %%mm0 \n\t"
|
|
|
|
"movq 8(%3), %%mm1 \n\t"
|
|
|
|
"movq 16(%3), %%mm2 \n\t"
|
|
|
|
"movq 24(%3), %%mm3 \n\t"
|
|
|
|
"movq 32(%3), %%mm4 \n\t"
|
|
|
|
"movq 40(%3), %%mm5 \n\t"
|
|
|
|
"movq 48(%3), %%mm6 \n\t"
|
|
|
|
"movq 56(%3), %%mm7 \n\t"
|
|
|
|
"packuswb %%mm1, %%mm0 \n\t"
|
|
|
|
"packuswb %%mm3, %%mm2 \n\t"
|
|
|
|
"packuswb %%mm5, %%mm4 \n\t"
|
|
|
|
"packuswb %%mm7, %%mm6 \n\t"
|
|
|
|
"movq %%mm0, (%0) \n\t"
|
|
|
|
"movq %%mm2, (%0, %1) \n\t"
|
|
|
|
"movq %%mm4, (%0, %1, 2) \n\t"
|
|
|
|
"movq %%mm6, (%0, %2) \n\t"
|
|
|
|
:: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
|
|
|
|
: "memory");
|
2001-07-22 17:18:56 +03:00
|
|
|
}
|
|
|
|
|
2012-03-16 20:42:01 +03:00
|
|
|
#define put_signed_pixels_clamped_mmx_half(off) \
|
|
|
|
"movq "#off"(%2), %%mm1 \n\t" \
|
|
|
|
"movq 16 + "#off"(%2), %%mm2 \n\t" \
|
|
|
|
"movq 32 + "#off"(%2), %%mm3 \n\t" \
|
|
|
|
"movq 48 + "#off"(%2), %%mm4 \n\t" \
|
|
|
|
"packsswb 8 + "#off"(%2), %%mm1 \n\t" \
|
|
|
|
"packsswb 24 + "#off"(%2), %%mm2 \n\t" \
|
|
|
|
"packsswb 40 + "#off"(%2), %%mm3 \n\t" \
|
|
|
|
"packsswb 56 + "#off"(%2), %%mm4 \n\t" \
|
|
|
|
"paddb %%mm0, %%mm1 \n\t" \
|
|
|
|
"paddb %%mm0, %%mm2 \n\t" \
|
|
|
|
"paddb %%mm0, %%mm3 \n\t" \
|
|
|
|
"paddb %%mm0, %%mm4 \n\t" \
|
|
|
|
"movq %%mm1, (%0) \n\t" \
|
|
|
|
"movq %%mm2, (%0, %3) \n\t" \
|
|
|
|
"movq %%mm3, (%0, %3, 2) \n\t" \
|
|
|
|
"movq %%mm4, (%0, %1) \n\t"
|
2009-04-03 00:02:42 +03:00
|
|
|
|
2013-01-20 03:02:29 +03:00
|
|
|
void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
|
2012-03-06 15:00:42 +03:00
|
|
|
int line_size)
|
2004-04-27 06:58:06 +03:00
|
|
|
{
|
2009-04-03 00:02:42 +03:00
|
|
|
x86_reg line_skip = line_size;
|
2009-04-03 17:03:49 +03:00
|
|
|
x86_reg line_skip3;
|
2009-04-03 00:02:42 +03:00
|
|
|
|
|
|
|
__asm__ volatile (
|
2012-03-16 20:42:01 +03:00
|
|
|
"movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
|
|
|
|
"lea (%3, %3, 2), %1 \n\t"
|
|
|
|
put_signed_pixels_clamped_mmx_half(0)
|
|
|
|
"lea (%0, %3, 4), %0 \n\t"
|
|
|
|
put_signed_pixels_clamped_mmx_half(64)
|
|
|
|
: "+&r"(pixels), "=&r"(line_skip3)
|
|
|
|
: "r"(block), "r"(line_skip)
|
|
|
|
: "memory");
|
2004-04-27 06:58:06 +03:00
|
|
|
}
|
|
|
|
|
2013-01-20 03:02:29 +03:00
|
|
|
void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
|
2012-03-06 15:00:42 +03:00
|
|
|
int line_size)
|
2001-07-22 17:18:56 +03:00
|
|
|
{
|
2013-01-20 03:02:29 +03:00
|
|
|
const int16_t *p;
|
2003-02-11 18:35:48 +02:00
|
|
|
uint8_t *pix;
|
2001-07-22 17:18:56 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/* read the pixels */
|
2012-03-06 15:00:42 +03:00
|
|
|
p = block;
|
2001-07-22 17:18:56 +03:00
|
|
|
pix = pixels;
|
2002-02-18 11:40:05 +02:00
|
|
|
MOVQ_ZERO(mm7);
|
|
|
|
i = 4;
|
2002-04-24 15:18:59 +03:00
|
|
|
do {
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movq (%2), %%mm0 \n\t"
|
|
|
|
"movq 8(%2), %%mm1 \n\t"
|
|
|
|
"movq 16(%2), %%mm2 \n\t"
|
|
|
|
"movq 24(%2), %%mm3 \n\t"
|
|
|
|
"movq %0, %%mm4 \n\t"
|
|
|
|
"movq %1, %%mm6 \n\t"
|
|
|
|
"movq %%mm4, %%mm5 \n\t"
|
|
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
|
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
|
|
"paddsw %%mm4, %%mm0 \n\t"
|
|
|
|
"paddsw %%mm5, %%mm1 \n\t"
|
|
|
|
"movq %%mm6, %%mm5 \n\t"
|
|
|
|
"punpcklbw %%mm7, %%mm6 \n\t"
|
|
|
|
"punpckhbw %%mm7, %%mm5 \n\t"
|
|
|
|
"paddsw %%mm6, %%mm2 \n\t"
|
|
|
|
"paddsw %%mm5, %%mm3 \n\t"
|
|
|
|
"packuswb %%mm1, %%mm0 \n\t"
|
|
|
|
"packuswb %%mm3, %%mm2 \n\t"
|
|
|
|
"movq %%mm0, %0 \n\t"
|
|
|
|
"movq %%mm2, %1 \n\t"
|
|
|
|
: "+m"(*pix), "+m"(*(pix + line_size))
|
|
|
|
: "r"(p)
|
|
|
|
: "memory");
|
2012-03-06 15:00:42 +03:00
|
|
|
pix += line_size * 2;
|
|
|
|
p += 16;
|
2002-04-24 15:18:59 +03:00
|
|
|
} while (--i);
|
2001-07-22 17:18:56 +03:00
|
|
|
}
|
|
|
|
|
2012-03-16 20:42:01 +03:00
|
|
|
#define CLEAR_BLOCKS(name, n) \
|
2013-05-08 02:45:39 +03:00
|
|
|
void name(int16_t *blocks) \
|
2012-03-16 20:42:01 +03:00
|
|
|
{ \
|
|
|
|
__asm__ volatile ( \
|
|
|
|
"pxor %%mm7, %%mm7 \n\t" \
|
|
|
|
"mov %1, %%"REG_a" \n\t" \
|
|
|
|
"1: \n\t" \
|
|
|
|
"movq %%mm7, (%0, %%"REG_a") \n\t" \
|
|
|
|
"movq %%mm7, 8(%0, %%"REG_a") \n\t" \
|
|
|
|
"movq %%mm7, 16(%0, %%"REG_a") \n\t" \
|
|
|
|
"movq %%mm7, 24(%0, %%"REG_a") \n\t" \
|
|
|
|
"add $32, %%"REG_a" \n\t" \
|
|
|
|
"js 1b \n\t" \
|
|
|
|
:: "r"(((uint8_t *)blocks) + 128 * n), \
|
|
|
|
"i"(-128 * n) \
|
|
|
|
: "%"REG_a \
|
|
|
|
); \
|
2008-12-10 23:35:17 +02:00
|
|
|
}
|
2013-05-08 02:45:39 +03:00
|
|
|
CLEAR_BLOCKS(ff_clear_blocks_mmx, 6)
|
|
|
|
CLEAR_BLOCKS(ff_clear_block_mmx, 1)
|
2008-12-10 23:35:17 +02:00
|
|
|
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_clear_block_sse(int16_t *block)
|
2002-03-28 15:41:04 +02:00
|
|
|
{
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"xorps %%xmm0, %%xmm0 \n"
|
|
|
|
"movaps %%xmm0, (%0) \n"
|
|
|
|
"movaps %%xmm0, 16(%0) \n"
|
|
|
|
"movaps %%xmm0, 32(%0) \n"
|
|
|
|
"movaps %%xmm0, 48(%0) \n"
|
|
|
|
"movaps %%xmm0, 64(%0) \n"
|
|
|
|
"movaps %%xmm0, 80(%0) \n"
|
|
|
|
"movaps %%xmm0, 96(%0) \n"
|
|
|
|
"movaps %%xmm0, 112(%0) \n"
|
2008-12-10 23:35:17 +02:00
|
|
|
:: "r"(block)
|
|
|
|
: "memory"
|
|
|
|
);
|
2002-03-28 15:41:04 +02:00
|
|
|
}
|
|
|
|
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_clear_blocks_sse(int16_t *blocks)
|
2012-03-06 15:00:42 +03:00
|
|
|
{
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"xorps %%xmm0, %%xmm0 \n"
|
|
|
|
"mov %1, %%"REG_a" \n"
|
|
|
|
"1: \n"
|
|
|
|
"movaps %%xmm0, (%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 16(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 32(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 48(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 64(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 80(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 96(%0, %%"REG_a") \n"
|
|
|
|
"movaps %%xmm0, 112(%0, %%"REG_a") \n"
|
|
|
|
"add $128, %%"REG_a" \n"
|
|
|
|
"js 1b \n"
|
|
|
|
:: "r"(((uint8_t *)blocks) + 128 * 6),
|
|
|
|
"i"(-128 * 6)
|
2009-06-16 20:33:57 +03:00
|
|
|
: "%"REG_a
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
|
2012-03-06 15:00:42 +03:00
|
|
|
{
|
|
|
|
x86_reg i = 0;
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"jmp 2f \n\t"
|
2005-12-22 03:10:11 +02:00
|
|
|
"1: \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
"movq (%1, %0), %%mm0 \n\t"
|
|
|
|
"movq (%2, %0), %%mm1 \n\t"
|
|
|
|
"paddb %%mm0, %%mm1 \n\t"
|
|
|
|
"movq %%mm1, (%2, %0) \n\t"
|
|
|
|
"movq 8(%1, %0), %%mm0 \n\t"
|
|
|
|
"movq 8(%2, %0), %%mm1 \n\t"
|
|
|
|
"paddb %%mm0, %%mm1 \n\t"
|
|
|
|
"movq %%mm1, 8(%2, %0) \n\t"
|
|
|
|
"add $16, %0 \n\t"
|
2008-06-22 10:05:40 +03:00
|
|
|
"2: \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
"cmp %3, %0 \n\t"
|
|
|
|
"js 1b \n\t"
|
|
|
|
: "+r"(i)
|
|
|
|
: "r"(src), "r"(dst), "r"((x86_reg)w - 15)
|
2002-11-14 21:20:04 +02:00
|
|
|
);
|
2012-03-06 15:00:42 +03:00
|
|
|
for ( ; i < w; i++)
|
|
|
|
dst[i + 0] += src[i + 0];
|
2002-11-14 21:20:04 +02:00
|
|
|
}
|
|
|
|
|
2011-06-23 00:30:15 +03:00
|
|
|
#if HAVE_7REGS
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
|
|
|
|
const uint8_t *diff, int w,
|
|
|
|
int *left, int *left_top)
|
2012-03-06 15:00:42 +03:00
|
|
|
{
|
2009-02-08 19:45:30 +02:00
|
|
|
x86_reg w2 = -w;
|
|
|
|
x86_reg x;
|
2012-03-06 15:00:42 +03:00
|
|
|
int l = *left & 0xff;
|
2009-02-08 19:45:30 +02:00
|
|
|
int tl = *left_top & 0xff;
|
|
|
|
int t;
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"mov %7, %3 \n"
|
|
|
|
"1: \n"
|
|
|
|
"movzbl (%3, %4), %2 \n"
|
|
|
|
"mov %2, %k3 \n"
|
|
|
|
"sub %b1, %b3 \n"
|
|
|
|
"add %b0, %b3 \n"
|
|
|
|
"mov %2, %1 \n"
|
|
|
|
"cmp %0, %2 \n"
|
|
|
|
"cmovg %0, %2 \n"
|
|
|
|
"cmovg %1, %0 \n"
|
|
|
|
"cmp %k3, %0 \n"
|
|
|
|
"cmovg %k3, %0 \n"
|
|
|
|
"mov %7, %3 \n"
|
|
|
|
"cmp %2, %0 \n"
|
|
|
|
"cmovl %2, %0 \n"
|
|
|
|
"add (%6, %4), %b0 \n"
|
|
|
|
"mov %b0, (%5, %4) \n"
|
|
|
|
"inc %4 \n"
|
|
|
|
"jl 1b \n"
|
|
|
|
: "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
|
|
|
|
: "r"(dst + w), "r"(diff + w), "rm"(top + w)
|
2009-02-08 19:45:30 +02:00
|
|
|
);
|
2012-03-06 15:00:42 +03:00
|
|
|
*left = l;
|
2009-02-08 19:45:30 +02:00
|
|
|
*left_top = tl;
|
|
|
|
}
|
|
|
|
#endif
|
2003-12-03 00:02:57 +02:00
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
/* Draw the edges of width 'w' of an image of size width, height
|
|
|
|
* this MMX version can only handle w == 8 || w == 16. */
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
|
|
|
|
int w, int h, int sides)
|
2008-03-04 02:07:41 +02:00
|
|
|
{
|
|
|
|
uint8_t *ptr, *last_line;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
last_line = buf + (height - 1) * wrap;
|
|
|
|
/* left and right */
|
|
|
|
ptr = buf;
|
2012-03-06 15:00:42 +03:00
|
|
|
if (w == 8) {
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"1: \n\t"
|
|
|
|
"movd (%0), %%mm0 \n\t"
|
|
|
|
"punpcklbw %%mm0, %%mm0 \n\t"
|
|
|
|
"punpcklwd %%mm0, %%mm0 \n\t"
|
|
|
|
"punpckldq %%mm0, %%mm0 \n\t"
|
|
|
|
"movq %%mm0, -8(%0) \n\t"
|
|
|
|
"movq -8(%0, %2), %%mm1 \n\t"
|
|
|
|
"punpckhbw %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhwd %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhdq %%mm1, %%mm1 \n\t"
|
|
|
|
"movq %%mm1, (%0, %2) \n\t"
|
|
|
|
"add %1, %0 \n\t"
|
|
|
|
"cmp %3, %0 \n\t"
|
|
|
|
"jb 1b \n\t"
|
|
|
|
: "+r"(ptr)
|
|
|
|
: "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
|
|
|
|
);
|
2012-06-22 17:12:54 +03:00
|
|
|
} else if(w==16){
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"1: \n\t"
|
|
|
|
"movd (%0), %%mm0 \n\t"
|
|
|
|
"punpcklbw %%mm0, %%mm0 \n\t"
|
|
|
|
"punpcklwd %%mm0, %%mm0 \n\t"
|
|
|
|
"punpckldq %%mm0, %%mm0 \n\t"
|
|
|
|
"movq %%mm0, -8(%0) \n\t"
|
|
|
|
"movq %%mm0, -16(%0) \n\t"
|
|
|
|
"movq -8(%0, %2), %%mm1 \n\t"
|
|
|
|
"punpckhbw %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhwd %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhdq %%mm1, %%mm1 \n\t"
|
|
|
|
"movq %%mm1, (%0, %2) \n\t"
|
|
|
|
"movq %%mm1, 8(%0, %2) \n\t"
|
|
|
|
"add %1, %0 \n\t"
|
|
|
|
"cmp %3, %0 \n\t"
|
|
|
|
"jb 1b \n\t"
|
2012-06-22 17:12:54 +03:00
|
|
|
: "+r"(ptr)
|
|
|
|
: "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
av_assert1(w == 4);
|
|
|
|
__asm__ volatile (
|
|
|
|
"1: \n\t"
|
|
|
|
"movd (%0), %%mm0 \n\t"
|
|
|
|
"punpcklbw %%mm0, %%mm0 \n\t"
|
|
|
|
"punpcklwd %%mm0, %%mm0 \n\t"
|
|
|
|
"movd %%mm0, -4(%0) \n\t"
|
|
|
|
"movd -4(%0, %2), %%mm1 \n\t"
|
|
|
|
"punpcklbw %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhwd %%mm1, %%mm1 \n\t"
|
|
|
|
"punpckhdq %%mm1, %%mm1 \n\t"
|
|
|
|
"movd %%mm1, (%0, %2) \n\t"
|
|
|
|
"add %1, %0 \n\t"
|
|
|
|
"cmp %3, %0 \n\t"
|
|
|
|
"jb 1b \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
: "+r"(ptr)
|
|
|
|
: "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
|
|
|
|
);
|
2008-03-04 02:07:41 +02:00
|
|
|
}
|
|
|
|
|
2011-03-26 23:31:13 +02:00
|
|
|
/* top and bottom (and hopefully also the corners) */
|
2012-03-06 15:00:42 +03:00
|
|
|
if (sides & EDGE_TOP) {
|
|
|
|
for (i = 0; i < h; i += 4) {
|
|
|
|
ptr = buf - (i + 1) * wrap - w;
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"1: \n\t"
|
|
|
|
"movq (%1, %0), %%mm0 \n\t"
|
|
|
|
"movq %%mm0, (%0) \n\t"
|
|
|
|
"movq %%mm0, (%0, %2) \n\t"
|
|
|
|
"movq %%mm0, (%0, %2, 2) \n\t"
|
|
|
|
"movq %%mm0, (%0, %3) \n\t"
|
|
|
|
"add $8, %0 \n\t"
|
|
|
|
"cmp %4, %0 \n\t"
|
|
|
|
"jb 1b \n\t"
|
|
|
|
: "+r"(ptr)
|
|
|
|
: "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
|
|
|
|
"r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
|
|
|
|
);
|
2011-03-22 23:36:57 +02:00
|
|
|
}
|
2011-03-26 23:31:13 +02:00
|
|
|
}
|
2011-03-22 23:36:57 +02:00
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
if (sides & EDGE_BOTTOM) {
|
|
|
|
for (i = 0; i < h; i += 4) {
|
|
|
|
ptr = last_line + (i + 1) * wrap - w;
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"1: \n\t"
|
|
|
|
"movq (%1, %0), %%mm0 \n\t"
|
|
|
|
"movq %%mm0, (%0) \n\t"
|
|
|
|
"movq %%mm0, (%0, %2) \n\t"
|
|
|
|
"movq %%mm0, (%0, %2, 2) \n\t"
|
|
|
|
"movq %%mm0, (%0, %3) \n\t"
|
|
|
|
"add $8, %0 \n\t"
|
|
|
|
"cmp %4, %0 \n\t"
|
|
|
|
"jb 1b \n\t"
|
|
|
|
: "+r"(ptr)
|
|
|
|
: "r"((x86_reg)last_line - (x86_reg)ptr - w),
|
|
|
|
"r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
|
|
|
|
"r"(ptr + width + 2 * w)
|
|
|
|
);
|
2011-03-22 23:36:57 +02:00
|
|
|
}
|
2008-03-04 02:07:41 +02:00
|
|
|
}
|
|
|
|
}
|
2003-01-05 17:57:10 +02:00
|
|
|
|
2012-10-06 23:36:49 +03:00
|
|
|
typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
|
2012-12-25 04:04:31 +03:00
|
|
|
ptrdiff_t linesize, int block_w, int block_h,
|
2012-10-06 23:36:49 +03:00
|
|
|
int src_x, int src_y, int w, int h);
|
|
|
|
|
|
|
|
static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
|
|
|
|
int stride, int h, int ox, int oy,
|
|
|
|
int dxx, int dxy, int dyx, int dyy,
|
|
|
|
int shift, int r, int width, int height,
|
|
|
|
emulated_edge_mc_func *emu_edge_fn)
|
2011-02-01 03:55:56 +02:00
|
|
|
{
|
2012-03-06 15:00:42 +03:00
|
|
|
const int w = 8;
|
|
|
|
const int ix = ox >> (16 + shift);
|
|
|
|
const int iy = oy >> (16 + shift);
|
|
|
|
const int oxs = ox >> 4;
|
|
|
|
const int oys = oy >> 4;
|
|
|
|
const int dxxs = dxx >> 4;
|
|
|
|
const int dxys = dxy >> 4;
|
|
|
|
const int dyxs = dyx >> 4;
|
|
|
|
const int dyys = dyy >> 4;
|
|
|
|
const uint16_t r4[4] = { r, r, r, r };
|
|
|
|
const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
|
|
|
|
const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
|
|
|
|
const uint64_t shift2 = 2 * shift;
|
2012-10-06 23:36:49 +03:00
|
|
|
#define MAX_STRIDE 4096U
|
|
|
|
#define MAX_H 8U
|
|
|
|
uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
|
2006-04-04 12:23:45 +03:00
|
|
|
int x, y;
|
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
|
|
|
|
const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
|
|
|
|
const int dxh = dxy * (h - 1);
|
|
|
|
const int dyw = dyx * (w - 1);
|
2012-10-07 00:12:57 +03:00
|
|
|
int need_emu = (unsigned)ix >= width - w ||
|
|
|
|
(unsigned)iy >= height - h;
|
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
if ( // non-constant fullpel offset (3% of blocks)
|
|
|
|
((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
|
|
|
|
(oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
|
2006-04-04 12:23:45 +03:00
|
|
|
// uses more than 16 bits of subpel mv (only at huge resolution)
|
2012-10-06 23:36:49 +03:00
|
|
|
|| (dxx | dxy | dyx | dyy) & 15
|
2012-10-07 00:12:57 +03:00
|
|
|
|| (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
|
2012-03-16 20:42:01 +03:00
|
|
|
// FIXME could still use mmx for some of the rows
|
2012-03-06 15:00:42 +03:00
|
|
|
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
|
|
|
|
shift, r, width, height);
|
2006-04-04 12:23:45 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
src += ix + iy * stride;
|
2012-10-07 00:12:57 +03:00
|
|
|
if (need_emu) {
|
2012-10-06 23:36:49 +03:00
|
|
|
emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
|
|
|
|
src = edge_buf;
|
|
|
|
}
|
2006-04-05 07:13:41 +03:00
|
|
|
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movd %0, %%mm6 \n\t"
|
|
|
|
"pxor %%mm7, %%mm7 \n\t"
|
|
|
|
"punpcklwd %%mm6, %%mm6 \n\t"
|
|
|
|
"punpcklwd %%mm6, %%mm6 \n\t"
|
2006-04-05 07:13:41 +03:00
|
|
|
:: "r"(1<<shift)
|
|
|
|
);
|
2006-04-04 12:23:45 +03:00
|
|
|
|
2012-03-06 15:00:42 +03:00
|
|
|
for (x = 0; x < w; x += 4) {
|
|
|
|
uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
|
|
|
|
oxs - dxys + dxxs * (x + 1),
|
|
|
|
oxs - dxys + dxxs * (x + 2),
|
|
|
|
oxs - dxys + dxxs * (x + 3) };
|
|
|
|
uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
|
|
|
|
oys - dyys + dyxs * (x + 1),
|
|
|
|
oys - dyys + dyxs * (x + 2),
|
|
|
|
oys - dyys + dyxs * (x + 3) };
|
|
|
|
|
|
|
|
for (y = 0; y < h; y++) {
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movq %0, %%mm4 \n\t"
|
|
|
|
"movq %1, %%mm5 \n\t"
|
|
|
|
"paddw %2, %%mm4 \n\t"
|
|
|
|
"paddw %3, %%mm5 \n\t"
|
|
|
|
"movq %%mm4, %0 \n\t"
|
|
|
|
"movq %%mm5, %1 \n\t"
|
|
|
|
"psrlw $12, %%mm4 \n\t"
|
|
|
|
"psrlw $12, %%mm5 \n\t"
|
2006-04-04 12:23:45 +03:00
|
|
|
: "+m"(*dx4), "+m"(*dy4)
|
|
|
|
: "m"(*dxy4), "m"(*dyy4)
|
|
|
|
);
|
|
|
|
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movq %%mm6, %%mm2 \n\t"
|
|
|
|
"movq %%mm6, %%mm1 \n\t"
|
|
|
|
"psubw %%mm4, %%mm2 \n\t"
|
|
|
|
"psubw %%mm5, %%mm1 \n\t"
|
|
|
|
"movq %%mm2, %%mm0 \n\t"
|
|
|
|
"movq %%mm4, %%mm3 \n\t"
|
|
|
|
"pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
|
|
|
|
"pmullw %%mm5, %%mm3 \n\t" // dx * dy
|
|
|
|
"pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
|
|
|
|
"pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
|
|
|
|
|
|
|
|
"movd %4, %%mm5 \n\t"
|
|
|
|
"movd %3, %%mm4 \n\t"
|
2006-04-04 12:23:45 +03:00
|
|
|
"punpcklbw %%mm7, %%mm5 \n\t"
|
|
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
"pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
|
|
|
|
"pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
|
2006-04-04 12:23:45 +03:00
|
|
|
|
2012-03-16 20:42:01 +03:00
|
|
|
"movd %2, %%mm5 \n\t"
|
|
|
|
"movd %1, %%mm4 \n\t"
|
2006-04-04 12:23:45 +03:00
|
|
|
"punpcklbw %%mm7, %%mm5 \n\t"
|
|
|
|
"punpcklbw %%mm7, %%mm4 \n\t"
|
2012-03-16 20:42:01 +03:00
|
|
|
"pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
|
|
|
|
"pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
|
|
|
|
"paddw %5, %%mm1 \n\t"
|
|
|
|
"paddw %%mm3, %%mm2 \n\t"
|
|
|
|
"paddw %%mm1, %%mm0 \n\t"
|
|
|
|
"paddw %%mm2, %%mm0 \n\t"
|
|
|
|
|
|
|
|
"psrlw %6, %%mm0 \n\t"
|
|
|
|
"packuswb %%mm0, %%mm0 \n\t"
|
|
|
|
"movd %%mm0, %0 \n\t"
|
|
|
|
|
|
|
|
: "=m"(dst[x + y * stride])
|
2006-04-04 12:23:45 +03:00
|
|
|
: "m"(src[0]), "m"(src[1]),
|
2012-03-16 20:42:01 +03:00
|
|
|
"m"(src[stride]), "m"(src[stride + 1]),
|
2006-04-04 12:23:45 +03:00
|
|
|
"m"(*r4), "m"(shift2)
|
|
|
|
);
|
|
|
|
src += stride;
|
|
|
|
}
|
2012-03-06 15:00:42 +03:00
|
|
|
src += 4 - h * stride;
|
2006-04-04 12:23:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-10 12:04:46 +03:00
|
|
|
#if CONFIG_VIDEODSP
|
2012-10-06 23:36:49 +03:00
|
|
|
#if HAVE_YASM
|
|
|
|
#if ARCH_X86_32
|
2013-05-14 11:55:28 +03:00
|
|
|
void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
|
|
|
|
int stride, int h, int ox, int oy,
|
|
|
|
int dxx, int dxy, int dyx, int dyy,
|
|
|
|
int shift, int r, int width, int height)
|
2012-10-06 23:36:49 +03:00
|
|
|
{
|
|
|
|
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
|
2012-12-21 19:18:43 +03:00
|
|
|
width, height, &ff_emulated_edge_mc_8);
|
2012-10-06 23:36:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
2013-05-14 11:55:28 +03:00
|
|
|
void ff_gmc_sse(uint8_t *dst, uint8_t *src,
|
|
|
|
int stride, int h, int ox, int oy,
|
|
|
|
int dxx, int dxy, int dyx, int dyy,
|
|
|
|
int shift, int r, int width, int height)
|
2012-10-06 23:36:49 +03:00
|
|
|
{
|
|
|
|
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
|
2012-12-21 19:18:43 +03:00
|
|
|
width, height, &ff_emulated_edge_mc_8);
|
2012-10-06 23:36:49 +03:00
|
|
|
}
|
|
|
|
#else
|
2013-05-14 11:55:28 +03:00
|
|
|
void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
|
|
|
|
int stride, int h, int ox, int oy,
|
|
|
|
int dxx, int dxy, int dyx, int dyy,
|
|
|
|
int shift, int r, int width, int height)
|
2012-10-06 23:36:49 +03:00
|
|
|
{
|
|
|
|
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
|
|
|
|
width, height, &ff_emulated_edge_mc_8);
|
|
|
|
}
|
|
|
|
#endif
|
2013-01-10 12:04:46 +03:00
|
|
|
#endif
|
2012-10-06 23:36:49 +03:00
|
|
|
|
2011-10-06 18:57:17 +03:00
|
|
|
#if CONFIG_DIRAC_DECODER
|
2013-01-27 15:49:57 +03:00
|
|
|
#define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
|
|
|
|
void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
|
2011-10-06 18:57:17 +03:00
|
|
|
{\
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3)\
|
|
|
|
ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
|
|
|
|
else\
|
|
|
|
OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
|
2011-10-06 18:57:17 +03:00
|
|
|
}\
|
2013-01-27 15:49:57 +03:00
|
|
|
void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
|
2011-10-06 18:57:17 +03:00
|
|
|
{\
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3)\
|
|
|
|
ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
|
|
|
|
else\
|
|
|
|
OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
|
2011-10-06 18:57:17 +03:00
|
|
|
}\
|
2013-01-27 15:49:57 +03:00
|
|
|
void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
|
2011-10-06 18:57:17 +03:00
|
|
|
{\
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3) {\
|
|
|
|
ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
|
|
|
|
} else {\
|
|
|
|
OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
|
|
|
|
OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
|
|
|
|
}\
|
2011-10-06 18:57:17 +03:00
|
|
|
}
|
|
|
|
|
2013-01-30 04:41:31 +03:00
|
|
|
#if HAVE_MMX_INLINE
|
2013-05-14 11:55:28 +03:00
|
|
|
PIXELS16(static, ff_avg, , , _mmxext)
|
2013-05-03 11:30:37 +03:00
|
|
|
DIRAC_PIXOP(put, ff_put, mmx)
|
2013-05-03 11:43:52 +03:00
|
|
|
DIRAC_PIXOP(avg, ff_avg, mmx)
|
2013-01-30 04:41:31 +03:00
|
|
|
#endif
|
2011-10-06 18:57:17 +03:00
|
|
|
|
2012-11-26 15:59:41 +03:00
|
|
|
#if HAVE_YASM
|
2013-01-30 08:47:09 +03:00
|
|
|
DIRAC_PIXOP(avg, ff_avg, mmxext)
|
|
|
|
|
2011-10-06 18:57:17 +03:00
|
|
|
void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
|
|
|
|
{
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3)
|
|
|
|
ff_put_dirac_pixels16_c(dst, src, stride, h);
|
|
|
|
else
|
2012-11-26 04:04:02 +03:00
|
|
|
ff_put_pixels16_sse2(dst, src[0], stride, h);
|
2011-10-06 18:57:17 +03:00
|
|
|
}
|
|
|
|
void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
|
|
|
|
{
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3)
|
|
|
|
ff_avg_dirac_pixels16_c(dst, src, stride, h);
|
|
|
|
else
|
2012-11-26 04:04:02 +03:00
|
|
|
ff_avg_pixels16_sse2(dst, src[0], stride, h);
|
2011-10-06 18:57:17 +03:00
|
|
|
}
|
|
|
|
void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
|
|
|
|
{
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3) {
|
|
|
|
ff_put_dirac_pixels32_c(dst, src, stride, h);
|
|
|
|
} else {
|
2012-11-26 04:04:02 +03:00
|
|
|
ff_put_pixels16_sse2(dst , src[0] , stride, h);
|
|
|
|
ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
|
2013-01-28 22:40:13 +03:00
|
|
|
}
|
2011-10-06 18:57:17 +03:00
|
|
|
}
|
|
|
|
void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
|
|
|
|
{
|
2013-01-28 22:40:13 +03:00
|
|
|
if (h&3) {
|
|
|
|
ff_avg_dirac_pixels32_c(dst, src, stride, h);
|
|
|
|
} else {
|
2012-11-26 04:04:02 +03:00
|
|
|
ff_avg_pixels16_sse2(dst , src[0] , stride, h);
|
|
|
|
ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
|
2013-01-28 22:40:13 +03:00
|
|
|
}
|
2011-10-06 18:57:17 +03:00
|
|
|
}
|
|
|
|
#endif
|
2012-11-26 15:59:41 +03:00
|
|
|
#endif
|
2011-10-06 18:57:17 +03:00
|
|
|
|
2013-05-08 02:45:39 +03:00
|
|
|
void ff_vector_clipf_sse(float *dst, const float *src,
|
|
|
|
float min, float max, int len)
|
2009-08-27 17:49:36 +03:00
|
|
|
{
|
2012-03-06 15:00:42 +03:00
|
|
|
x86_reg i = (len - 16) * 4;
|
2012-03-16 20:42:01 +03:00
|
|
|
__asm__ volatile (
|
|
|
|
"movss %3, %%xmm4 \n\t"
|
|
|
|
"movss %4, %%xmm5 \n\t"
|
|
|
|
"shufps $0, %%xmm4, %%xmm4 \n\t"
|
|
|
|
"shufps $0, %%xmm5, %%xmm5 \n\t"
|
|
|
|
"1: \n\t"
|
|
|
|
"movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
|
|
|
|
"movaps 16(%2, %0), %%xmm1 \n\t"
|
|
|
|
"movaps 32(%2, %0), %%xmm2 \n\t"
|
|
|
|
"movaps 48(%2, %0), %%xmm3 \n\t"
|
|
|
|
"maxps %%xmm4, %%xmm0 \n\t"
|
|
|
|
"maxps %%xmm4, %%xmm1 \n\t"
|
|
|
|
"maxps %%xmm4, %%xmm2 \n\t"
|
|
|
|
"maxps %%xmm4, %%xmm3 \n\t"
|
|
|
|
"minps %%xmm5, %%xmm0 \n\t"
|
|
|
|
"minps %%xmm5, %%xmm1 \n\t"
|
|
|
|
"minps %%xmm5, %%xmm2 \n\t"
|
|
|
|
"minps %%xmm5, %%xmm3 \n\t"
|
|
|
|
"movaps %%xmm0, (%1, %0) \n\t"
|
|
|
|
"movaps %%xmm1, 16(%1, %0) \n\t"
|
|
|
|
"movaps %%xmm2, 32(%1, %0) \n\t"
|
|
|
|
"movaps %%xmm3, 48(%1, %0) \n\t"
|
|
|
|
"sub $64, %0 \n\t"
|
|
|
|
"jge 1b \n\t"
|
|
|
|
: "+&r"(i)
|
|
|
|
: "r"(dst), "r"(src), "m"(min), "m"(max)
|
|
|
|
: "memory"
|
2009-08-27 17:49:36 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2012-07-23 00:14:20 +03:00
|
|
|
#endif /* HAVE_INLINE_ASM */
|