1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit 'ca411fc1d34329cd17b28627f697e391ae52073f'

* commit 'ca411fc1d34329cd17b28627f697e391ae52073f':
  avcodec: Remove broken MMI optimizations

Conflicts:
	arch.mak
	configure
	libavcodec/avcodec.h
	libavcodec/mips/Makefile
	libavcodec/mips/dsputil_mmi.c
	libavcodec/mips/idct_mmi.c
	libavcodec/mips/mmi.h
	libavcodec/mips/mpegvideo_mmi.c
	libavcodec/options_table.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-10-13 15:25:11 +02:00
commit 85fe70b64c
15 changed files with 10 additions and 807 deletions

View File

@ -70,7 +70,7 @@ config.h: .config
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \ ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS NEON-OBJS \ ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS NEON-OBJS \
MMI-OBJS ALTIVEC-OBJS VIS-OBJS \ ALTIVEC-OBJS VIS-OBJS \
MMX-OBJS YASM-OBJS \ MMX-OBJS YASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \ MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
OBJS HOSTOBJS TESTOBJS OBJS HOSTOBJS TESTOBJS

View File

@ -3,7 +3,6 @@ OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes) OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes) OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes) OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes) OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes) OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)

5
configure vendored
View File

@ -289,7 +289,6 @@ Optimization options (experts only):
--disable-armv6 disable armv6 optimizations --disable-armv6 disable armv6 optimizations
--disable-armv6t2 disable armv6t2 optimizations --disable-armv6t2 disable armv6t2 optimizations
--disable-armvfp disable ARM VFP optimizations --disable-armvfp disable ARM VFP optimizations
--disable-mmi disable MMI optimizations
--disable-neon disable NEON optimizations --disable-neon disable NEON optimizations
--disable-vis disable VIS optimizations --disable-vis disable VIS optimizations
--disable-inline-asm disable use of inline assembler --disable-inline-asm disable use of inline assembler
@ -1235,7 +1234,6 @@ ARCH_EXT_LIST="
armv6 armv6
armv6t2 armv6t2
armvfp armvfp
mmi
neon neon
ppc4xx ppc4xx
vfpv3 vfpv3
@ -1517,7 +1515,6 @@ mipsfpu_deps="mips"
mips32r2_deps="mips" mips32r2_deps="mips"
mipsdspr1_deps="mips" mipsdspr1_deps="mips"
mipsdspr2_deps="mips" mipsdspr2_deps="mips"
mmi_deps="mips"
altivec_deps="ppc" altivec_deps="ppc"
ppc4xx_deps="ppc" ppc4xx_deps="ppc"
@ -3359,7 +3356,6 @@ EOF
elif enabled mips; then elif enabled mips; then
check_inline_asm loongson '"dmult.g $1, $2, $3"' check_inline_asm loongson '"dmult.g $1, $2, $3"'
enabled mmi && check_inline_asm mmi '"lq $2, 0($2)"'
enabled mips32r2 && add_cflags "-mips32r2" && add_asflags "-mips32r2" && enabled mips32r2 && add_cflags "-mips32r2" && add_asflags "-mips32r2" &&
check_inline_asm mips32r2 '"rotr $t0, $t1, 1"' check_inline_asm mips32r2 '"rotr $t0, $t1, 1"'
enabled mipsdspr1 && add_cflags "-mdsp" && add_asflags "-mdsp" && enabled mipsdspr1 && add_cflags "-mdsp" && add_asflags "-mdsp" &&
@ -4028,7 +4024,6 @@ if enabled arm; then
echo "NEON enabled ${neon-no}" echo "NEON enabled ${neon-no}"
fi fi
if enabled mips; then if enabled mips; then
echo "MMI enabled ${mmi-no}"
echo "MIPS FPU enabled ${mipsfpu-no}" echo "MIPS FPU enabled ${mipsfpu-no}"
echo "MIPS32R2 enabled ${mips32r2-no}" echo "MIPS32R2 enabled ${mips32r2-no}"
echo "MIPS DSP R1 enabled ${mipsdspr1-no}" echo "MIPS DSP R1 enabled ${mipsdspr1-no}"

View File

@ -2788,7 +2788,9 @@ typedef struct AVCodecContext {
#define FF_IDCT_SIMPLE 2 #define FF_IDCT_SIMPLE 2
#define FF_IDCT_SIMPLEMMX 3 #define FF_IDCT_SIMPLEMMX 3
#define FF_IDCT_LIBMPEG2MMX 4 #define FF_IDCT_LIBMPEG2MMX 4
#if FF_API_MMI
#define FF_IDCT_MMI 5 #define FF_IDCT_MMI 5
#endif
#define FF_IDCT_ARM 7 #define FF_IDCT_ARM 7
#define FF_IDCT_ALTIVEC 8 #define FF_IDCT_ALTIVEC 8
#define FF_IDCT_SH4 9 #define FF_IDCT_SH4 9

View File

@ -3162,7 +3162,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
if (HAVE_VIS) ff_dsputil_init_vis (c, avctx); if (HAVE_VIS) ff_dsputil_init_vis (c, avctx);
if (ARCH_ALPHA) ff_dsputil_init_alpha (c, avctx); if (ARCH_ALPHA) ff_dsputil_init_alpha (c, avctx);
if (ARCH_PPC) ff_dsputil_init_ppc (c, avctx); if (ARCH_PPC) ff_dsputil_init_ppc (c, avctx);
if (HAVE_MMI) ff_dsputil_init_mmi (c, avctx);
if (ARCH_SH4) ff_dsputil_init_sh4 (c, avctx); if (ARCH_SH4) ff_dsputil_init_sh4 (c, avctx);
if (ARCH_BFIN) ff_dsputil_init_bfin (c, avctx); if (ARCH_BFIN) ff_dsputil_init_bfin (c, avctx);
if (HAVE_MIPSFPU) ff_dsputil_init_mips (c, avctx); if (HAVE_MIPSFPU) ff_dsputil_init_mips (c, avctx);

View File

@ -610,7 +610,6 @@ static inline int get_penalty_factor(int lambda, int lambda2, int type){
void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
@ -619,7 +618,7 @@ void ff_dsputil_init_mips(DSPContext* c, AVCodecContext *avctx);
void ff_dsputil_init_dwt(DSPContext *c); void ff_dsputil_init_dwt(DSPContext *c);
#if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMI || HAVE_MMX #if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX
# define STRIDE_ALIGN 16 # define STRIDE_ALIGN 16
#else #else
# define STRIDE_ALIGN 8 # define STRIDE_ALIGN 8

View File

@ -1,8 +1,3 @@
MMI-OBJS += mips/dsputil_mmi.o \
mips/idct_mmi.o \
MMI-OBJS-$(CONFIG_MPEGVIDEO) += mips/mpegvideo_mmi.o
MIPSFPU-OBJS-$(CONFIG_AMRNB_DECODER) += mips/acelp_filters_mips.o \ MIPSFPU-OBJS-$(CONFIG_AMRNB_DECODER) += mips/acelp_filters_mips.o \
mips/celp_filters_mips.o \ mips/celp_filters_mips.o \
mips/celp_math_mips.o \ mips/celp_math_mips.o \

View File

@ -1,162 +0,0 @@
/*
* MMI optimized DSP utils
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* MMI optimization by Leon van Stuivenberg
* clear_blocks_mmi() by BroadQ
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/dsputil.h"
#include "mmi.h"
static void clear_blocks_mmi(DCTELEM * blocks)
{
__asm__ volatile(
".set noreorder \n"
"addiu $9, %0, 768 \n"
"nop \n"
"1: \n"
"sq $0, 0(%0) \n"
"move $8, %0 \n"
"addi %0, %0, 64 \n"
"sq $0, 16($8) \n"
"slt $10, %0, $9 \n"
"sq $0, 32($8) \n"
"bnez $10, 1b \n"
"sq $0, 48($8) \n"
".set reorder \n"
: "+r" (blocks) :: "$8", "$9", "memory" );
}
static void get_pixels_mmi(DCTELEM *block, const uint8_t *pixels, int line_size)
{
__asm__ volatile(
".set push \n\t"
".set mips3 \n\t"
"ld $8, 0(%0) \n\t"
"add %0, %0, %2 \n\t"
"ld $9, 0(%0) \n\t"
"add %0, %0, %2 \n\t"
"ld $10, 0(%0) \n\t"
"pextlb $8, $0, $8 \n\t"
"sq $8, 0(%1) \n\t"
"add %0, %0, %2 \n\t"
"ld $8, 0(%0) \n\t"
"pextlb $9, $0, $9 \n\t"
"sq $9, 16(%1) \n\t"
"add %0, %0, %2 \n\t"
"ld $9, 0(%0) \n\t"
"pextlb $10, $0, $10 \n\t"
"sq $10, 32(%1) \n\t"
"add %0, %0, %2 \n\t"
"ld $10, 0(%0) \n\t"
"pextlb $8, $0, $8 \n\t"
"sq $8, 48(%1) \n\t"
"add %0, %0, %2 \n\t"
"ld $8, 0(%0) \n\t"
"pextlb $9, $0, $9 \n\t"
"sq $9, 64(%1) \n\t"
"add %0, %0, %2 \n\t"
"ld $9, 0(%0) \n\t"
"pextlb $10, $0, $10 \n\t"
"sq $10, 80(%1) \n\t"
"pextlb $8, $0, $8 \n\t"
"sq $8, 96(%1) \n\t"
"pextlb $9, $0, $9 \n\t"
"sq $9, 112(%1) \n\t"
".set pop \n\t"
: "+r" (pixels) : "r" (block), "r" (line_size) : "$8", "$9", "$10", "memory" );
}
static void put_pixels8_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
__asm__ volatile(
".set push \n\t"
".set mips3 \n\t"
"1: \n\t"
"ldr $8, 0(%1) \n\t"
"addiu %2, %2, -1 \n\t"
"ldl $8, 7(%1) \n\t"
"add %1, %1, %3 \n\t"
"sd $8, 0(%0) \n\t"
"add %0, %0, %3 \n\t"
"bgtz %2, 1b \n\t"
".set pop \n\t"
: "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size)
: "$8", "memory" );
}
static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
__asm__ volatile (
".set push \n\t"
".set mips3 \n\t"
"1: \n\t"
"ldr $8, 0(%1) \n\t"
"add $11, %1, %3 \n\t"
"ldl $8, 7(%1) \n\t"
"add $10, %0, %3 \n\t"
"ldr $9, 8(%1) \n\t"
"ldl $9, 15(%1) \n\t"
"ldr $12, 0($11) \n\t"
"add %1, $11, %3 \n\t"
"ldl $12, 7($11) \n\t"
"pcpyld $8, $9, $8 \n\t"
"sq $8, 0(%0) \n\t"
"ldr $13, 8($11) \n\t"
"addiu %2, %2, -2 \n\t"
"ldl $13, 15($11) \n\t"
"add %0, $10, %3 \n\t"
"pcpyld $12, $13, $12 \n\t"
"sq $12, 0($10) \n\t"
"bgtz %2, 1b \n\t"
".set pop \n\t"
: "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size)
: "$8", "$9", "$10", "$11", "$12", "$13", "memory" );
}
void ff_dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
{
const int idct_algo= avctx->idct_algo;
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth) {
c->clear_blocks = clear_blocks_mmi;
c->put_pixels_tab[1][0] = put_pixels8_mmi;
c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmi;
c->put_pixels_tab[0][0] = put_pixels16_mmi;
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmi;
c->get_pixels = get_pixels_mmi;
}
if (avctx->bits_per_raw_sample <= 8 &&
(idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_MMI)) {
c->idct_put= ff_mmi_idct_put;
c->idct_add= ff_mmi_idct_add;
c->idct = ff_mmi_idct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
}
}

View File

@ -1,361 +0,0 @@
/*
* Originally provided by Intel at Application Note AP-922.
*
* Column code adapted from Peter Gubanov.
* Copyright (c) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
* http://www.elecard.com/peter/idct.shtml
* rounding trick copyright (c) 2000 Michel Lespinasse <walken@zoy.org>
*
* MMI port and (c) 2002 by Leon van Stuivenberg
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavcodec/dsputil.h"
#include "mmi.h"
#define BITS_INV_ACC 5 // 4 or 5 for IEEE
#define SHIFT_INV_ROW (16 - BITS_INV_ACC)
#define SHIFT_INV_COL (1 + BITS_INV_ACC)
#define TG1 6518
#define TG2 13573
#define TG3 21895
#define CS4 23170
#define ROUNDER_0 0
#define ROUNDER_1 16
#define TAB_i_04 (32+0)
#define TAB_i_17 (32+64)
#define TAB_i_26 (32+128)
#define TAB_i_35 (32+192)
#define TG_1_16 (32+256+0)
#define TG_2_16 (32+256+16)
#define TG_3_16 (32+256+32)
#define COS_4_16 (32+256+48)
#define CLIPMAX (32+256+64+0)
static short consttable[] align16 = {
/* rounder 0*/ // assume SHIFT_INV_ROW == 11
0x3ff, 1, 0x3ff, 1, 0x3ff, 1, 0x3ff, 1,
/* rounder 1*/
0x3ff, 0, 0x3ff, 0, 0x3ff, 0, 0x3ff, 0,
/* row 0/4*/
16384, 21407, -16384, -21407, 22725, 19266, -22725, -12873,
8867, 16384, 8867, 16384, 4520, 12873, -4520, 19266,
16384, -8867, 16384, -8867, 12873, -22725, 19266, -22725,
21407, -16384, -21407, 16384, 19266, 4520, -12873, 4520,
/* row 1/7*/
22725, 29692, -22725, -29692, 31521, 26722, -31521, -17855,
12299, 22725, 12299, 22725, 6270, 17855, -6270, 26722,
22725, -12299, 22725, -12299, 17855, -31521, 26722, -31521,
29692, -22725, -29692, 22725, 26722, 6270, -17855, 6270,
/* row 2/6*/
21407, 27969, -21407, -27969, 29692, 25172, -29692, -16819,
11585, 21407, 11585, 21407, 5906, 16819, -5906, 25172,
21407, -11585, 21407, -11585, 16819, -29692, 25172, -29692,
27969, -21407, -27969, 21407, 25172, 5906, -16819, 5906,
/*row 3/5*/
19266, 25172, -19266, -25172, 26722, 22654, -26722, -15137,
10426, 19266, 10426, 19266, 5315, 15137, -5315, 22654,
19266, -10426, 19266, -10426, 15137, -26722, 22654, -26722,
25172, -19266, -25172, 19266, 22654, 5315, -15137, 5315,
/*column constants*/
TG1, TG1, TG1, TG1, TG1, TG1, TG1, TG1,
TG2, TG2, TG2, TG2, TG2, TG2, TG2, TG2,
TG3, TG3, TG3, TG3, TG3, TG3, TG3, TG3,
CS4, CS4, CS4, CS4, CS4, CS4, CS4, CS4,
/* clamp */
255, 255, 255, 255, 255, 255, 255, 255
};
#define DCT_8_INV_ROW1(blk, rowoff, taboff, rnd, outreg) { \
lq(blk, rowoff, $16); /* r16 = x7 x5 x3 x1 x6 x4 x2 x0 */ \
/*slot*/ \
lq($24, 0+taboff, $17); /* r17 = w */ \
/*delay slot $16*/ \
lq($24, 16+taboff, $18);/* r18 = w */ \
prevh($16, $2); /* r2 = x1 x3 x5 x7 x0 x2 x4 x6 */ \
lq($24, 32+taboff, $19);/* r19 = w */ \
phmadh($17, $16, $17); /* r17 = b1"b0'a1"a0' */ \
lq($24, 48+taboff, $20);/* r20 = w */ \
phmadh($18, $2, $18); /* r18 = b1'b0"a1'a0" */ \
phmadh($19, $16, $19); /* r19 = b3"b2'a3"a2' */ \
phmadh($20, $2, $20); /* r20 = b3'b2"a3'a2" */ \
paddw($17, $18, $17); /* r17 = (b1)(b0)(a1)(a0) */ \
paddw($19, $20, $19); /* r19 = (b3)(b2)(a3)(a2) */ \
pcpyld($19, $17, $18); /* r18 = (a3)(a2)(a1)(a0) */ \
pcpyud($17, $19, $20); /* r20 = (b3)(b2)(b1)(b0) */ \
paddw($18, rnd, $18); /* r18 = (a3)(a2)(a1)(a0) */\
paddw($18, $20, $17); /* r17 = ()()()(a0+b0) */ \
psubw($18, $20, $20); /* r20 = ()()()(a0-b0) */ \
psraw($17, SHIFT_INV_ROW, $17); /* r17 = (y3 y2 y1 y0) */ \
psraw($20, SHIFT_INV_ROW, $20); /* r20 = (y4 y5 y6 y7) */ \
ppach($20, $17, outreg);/* out = y4 y5 y6 y7 y3 y2 y1 y0 Note order */ \
\
prevh(outreg, $2); \
pcpyud($2, $2, $2); \
pcpyld($2, outreg, outreg); \
}
#define DCT_8_INV_COL8() \
\
lq($24, TG_3_16, $2); /* r2 = tn3 */ \
\
pmulth($11, $2, $17); /* r17 = x3 * tn3 (6420) */ \
psraw($17, 15, $17); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \
psubh($17, $13, $17); /* r17 = tm35 */ \
\
pmulth($13, $2, $18); /* r18 = x5 * tn3 (6420) */ \
psraw($18, 15, $18); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $18, $18); /* r18 = x5 * tn3 */ \
paddh($18, $11, $18); /* r18 = tp35 */ \
\
lq($24, TG_1_16, $2); /* r2 = tn1 */ \
\
pmulth($15, $2, $19); /* r19 = x7 * tn1 (6420) */ \
psraw($19, 15, $19); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $19, $19); /* r19 = x7 * tn1 */ \
paddh($19, $9, $19); /* r19 = tp17 */ \
\
pmulth($9, $2, $20); /* r20 = x1 * tn1 (6420) */ \
psraw($20, 15, $20); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $20, $20); /* r20 = x1 * tn1 */ \
psubh($20, $15, $20); /* r20 = tm17 */ \
\
psubh($19, $18, $3); /* r3 = t1 */ \
paddh($20, $17, $16); /* r16 = t2 */ \
psubh($20, $17, $23); /* r23 = b3 */ \
paddh($19, $18, $20); /* r20 = b0 */ \
\
lq($24, COS_4_16, $2); /* r2 = cs4 */ \
\
paddh($3, $16, $21); /* r21 = t1+t2 */ \
psubh($3, $16, $22); /* r22 = t1-t2 */ \
\
pmulth($21, $2, $21); /* r21 = cs4 * (t1+t2) 6420 */ \
psraw($21, 15, $21); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $21, $21); /* r21 = b1 */ \
\
pmulth($22, $2, $22); /* r22 = cs4 * (t1-t2) 6420 */ \
psraw($22, 15, $22); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $22, $22); /* r22 = b2 */ \
\
lq($24, TG_2_16, $2); /* r2 = tn2 */ \
\
pmulth($10, $2, $17); /* r17 = x2 * tn2 (6420) */ \
psraw($17, 15, $17); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \
psubh($17, $14, $17); /* r17 = tm26 */ \
\
pmulth($14, $2, $18); /* r18 = x6 * tn2 (6420) */ \
psraw($18, 15, $18); \
pmfhl_uw($3); /* r3 = 7531 */ \
psraw($3, 15, $3); \
pinteh($3, $18, $18); /* r18 = x6 * tn2 */ \
paddh($18, $10, $18); /* r18 = tp26 */ \
\
paddh($8, $12, $2); /* r2 = tp04 */ \
psubh($8, $12, $3); /* r3 = tm04 */ \
\
paddh($2, $18, $16); /* r16 = a0 */ \
psubh($2, $18, $19); /* r19 = a3 */ \
psubh($3, $17, $18); /* r18 = a2 */ \
paddh($3, $17, $17); /* r17 = a1 */
#define DCT_8_INV_COL8_STORE(blk) \
\
paddh($16, $20, $2); /* y0 a0+b0 */ \
psubh($16, $20, $16); /* y7 a0-b0 */ \
psrah($2, SHIFT_INV_COL, $2); \
psrah($16, SHIFT_INV_COL, $16); \
sq($2, 0, blk); \
sq($16, 112, blk); \
\
paddh($17, $21, $3); /* y1 a1+b1 */ \
psubh($17, $21, $17); /* y6 a1-b1 */ \
psrah($3, SHIFT_INV_COL, $3); \
psrah($17, SHIFT_INV_COL, $17); \
sq($3, 16, blk); \
sq($17, 96, blk); \
\
paddh($18, $22, $2); /* y2 a2+b2 */ \
psubh($18, $22, $18); /* y5 a2-b2 */ \
psrah($2, SHIFT_INV_COL, $2); \
psrah($18, SHIFT_INV_COL, $18); \
sq($2, 32, blk); \
sq($18, 80, blk); \
\
paddh($19, $23, $3); /* y3 a3+b3 */ \
psubh($19, $23, $19); /* y4 a3-b3 */ \
psrah($3, SHIFT_INV_COL, $3); \
psrah($19, SHIFT_INV_COL, $19); \
sq($3, 48, blk); \
sq($19, 64, blk);
#define DCT_8_INV_COL8_PMS() \
paddh($16, $20, $2); /* y0 a0+b0 */ \
psubh($16, $20, $20); /* y7 a0-b0 */ \
psrah($2, SHIFT_INV_COL, $16); \
psrah($20, SHIFT_INV_COL, $20); \
\
paddh($17, $21, $3); /* y1 a1+b1 */ \
psubh($17, $21, $21); /* y6 a1-b1 */ \
psrah($3, SHIFT_INV_COL, $17); \
psrah($21, SHIFT_INV_COL, $21); \
\
paddh($18, $22, $2); /* y2 a2+b2 */ \
psubh($18, $22, $22); /* y5 a2-b2 */ \
psrah($2, SHIFT_INV_COL, $18); \
psrah($22, SHIFT_INV_COL, $22); \
\
paddh($19, $23, $3); /* y3 a3+b3 */ \
psubh($19, $23, $23); /* y4 a3-b3 */ \
psrah($3, SHIFT_INV_COL, $19); \
psrah($23, SHIFT_INV_COL, $23);
#define PUT(rs) \
pminh(rs, $11, $2); \
pmaxh($2, $0, $2); \
ppacb($0, $2, $2); \
sd3(2, 0, 4); \
__asm__ volatile ("add $4, $5, $4");
#define DCT_8_INV_COL8_PUT() \
PUT($16); \
PUT($17); \
PUT($18); \
PUT($19); \
PUT($23); \
PUT($22); \
PUT($21); \
PUT($20);
#define ADD(rs) \
ld3(4, 0, 2); \
pextlb($0, $2, $2); \
paddh($2, rs, $2); \
pminh($2, $11, $2); \
pmaxh($2, $0, $2); \
ppacb($0, $2, $2); \
sd3(2, 0, 4); \
__asm__ volatile ("add $4, $5, $4");
/*fixme: schedule*/
#define DCT_8_INV_COL8_ADD() \
ADD($16); \
ADD($17); \
ADD($18); \
ADD($19); \
ADD($23); \
ADD($22); \
ADD($21); \
ADD($20);
void ff_mmi_idct(int16_t * block)
{
/* $4 = block */
__asm__ volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8);
DCT_8_INV_ROW1($4, 16, TAB_i_17, $7, $9);
DCT_8_INV_ROW1($4, 32, TAB_i_26, $7, $10);
DCT_8_INV_ROW1($4, 48, TAB_i_35, $7, $11);
DCT_8_INV_ROW1($4, 64, TAB_i_04, $7, $12);
DCT_8_INV_ROW1($4, 80, TAB_i_35, $7, $13);
DCT_8_INV_ROW1($4, 96, TAB_i_26, $7, $14);
DCT_8_INV_ROW1($4, 112, TAB_i_17, $7, $15);
DCT_8_INV_COL8();
DCT_8_INV_COL8_STORE($4);
//let savedtemp regs be saved
__asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
}
void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{
/* $4 = dest, $5 = line_size, $6 = block */
__asm__ volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
DCT_8_INV_COL8();
lq($24, CLIPMAX, $11);
DCT_8_INV_COL8_PMS();
DCT_8_INV_COL8_PUT();
//let savedtemp regs be saved
__asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
}
void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
{
/* $4 = dest, $5 = line_size, $6 = block */
__asm__ volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
DCT_8_INV_COL8();
lq($24, CLIPMAX, $11);
DCT_8_INV_COL8_PMS();
DCT_8_INV_COL8_ADD();
//let savedtemp regs be saved
__asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
}

View File

@ -1,179 +0,0 @@
/*
* copyright (c) 2002 Leon van Stuivenberg
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MIPS_MMI_H
#define AVCODEC_MIPS_MMI_H
#include <stdint.h>
#include "libavcodec/dsputil.h"
void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block);
void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block);
void ff_mmi_idct(DCTELEM *block);
#define align16 __attribute__ ((aligned (16)))
/*
#define r0 $zero
#define r1 $at //assembler!
#define r2 $v0 //return
#define r3 $v1 //return
#define r4 $a0 //arg
#define r5 $a1 //arg
#define r6 $a2 //arg
#define r7 $a3 //arg
#define r8 $t0 //temp
#define r9 $t1 //temp
#define r10 $t2 //temp
#define r11 $t3 //temp
#define r12 $t4 //temp
#define r13 $t5 //temp
#define r14 $t6 //temp
#define r15 $t7 //temp
#define r16 $s0 //saved temp
#define r17 $s1 //saved temp
#define r18 $s2 //saved temp
#define r19 $s3 //saved temp
#define r20 $s4 //saved temp
#define r21 $s5 //saved temp
#define r22 $s6 //saved temp
#define r23 $s7 //saved temp
#define r24 $t8 //temp
#define r25 $t9 //temp
#define r26 $k0 //kernel
#define r27 $k1 //kernel
#define r28 $gp //global ptr
#define r29 $sp //stack ptr
#define r30 $fp //frame ptr
#define r31 $ra //return addr
*/
#define lq(base, off, reg) \
__asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) )
#define lq2(mem, reg) \
__asm__ volatile ("lq " #reg ", %0" : : "r" (mem))
#define sq(reg, off, base) \
__asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) )
/*
#define ld(base, off, reg) \
__asm__ volatile ("ld " #reg ", " #off "("#base ")")
*/
#define ld3(base, off, reg) \
__asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
#define ldr3(base, off, reg) \
__asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
#define ldl3(base, off, reg) \
__asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
/*
#define sd(reg, off, base) \
__asm__ volatile ("sd " #reg ", " #off "("#base ")")
*/
//seems assembler has bug encoding mnemonic 'sd', so DIY
#define sd3(reg, off, base) \
__asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
#define sw(reg, off, base) \
__asm__ volatile ("sw " #reg ", " #off "("#base ")")
#define sq2(reg, mem) \
__asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem)))
#define pinth(rs, rt, rd) \
__asm__ volatile ("pinth " #rd ", " #rs ", " #rt )
#define phmadh(rs, rt, rd) \
__asm__ volatile ("phmadh " #rd ", " #rs ", " #rt )
#define pcpyud(rs, rt, rd) \
__asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt )
#define pcpyld(rs, rt, rd) \
__asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt )
#define pcpyh(rt, rd) \
__asm__ volatile ("pcpyh " #rd ", " #rt )
#define paddw(rs, rt, rd) \
__asm__ volatile ("paddw " #rd ", " #rs ", " #rt )
#define pextlw(rs, rt, rd) \
__asm__ volatile ("pextlw " #rd ", " #rs ", " #rt )
#define pextuw(rs, rt, rd) \
__asm__ volatile ("pextuw " #rd ", " #rs ", " #rt )
#define pextlh(rs, rt, rd) \
__asm__ volatile ("pextlh " #rd ", " #rs ", " #rt )
#define pextuh(rs, rt, rd) \
__asm__ volatile ("pextuh " #rd ", " #rs ", " #rt )
#define psubw(rs, rt, rd) \
__asm__ volatile ("psubw " #rd ", " #rs ", " #rt )
#define psraw(rt, sa, rd) \
__asm__ volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) )
#define ppach(rs, rt, rd) \
__asm__ volatile ("ppach " #rd ", " #rs ", " #rt )
#define ppacb(rs, rt, rd) \
__asm__ volatile ("ppacb " #rd ", " #rs ", " #rt )
#define prevh(rt, rd) \
__asm__ volatile ("prevh " #rd ", " #rt )
#define pmulth(rs, rt, rd) \
__asm__ volatile ("pmulth " #rd ", " #rs ", " #rt )
#define pmaxh(rs, rt, rd) \
__asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt )
#define pminh(rs, rt, rd) \
__asm__ volatile ("pminh " #rd ", " #rs ", " #rt )
#define pinteh(rs, rt, rd) \
__asm__ volatile ("pinteh " #rd ", " #rs ", " #rt )
#define paddh(rs, rt, rd) \
__asm__ volatile ("paddh " #rd ", " #rs ", " #rt )
#define psubh(rs, rt, rd) \
__asm__ volatile ("psubh " #rd ", " #rs ", " #rt )
#define psrah(rt, sa, rd) \
__asm__ volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) )
#define pmfhl_uw(rd) \
__asm__ volatile ("pmfhl.uw " #rd)
#define pextlb(rs, rt, rd) \
__asm__ volatile ("pextlb " #rd ", " #rs ", " #rt )
#endif /* AVCODEC_MIPS_MMI_H */

View File

@ -1,87 +0,0 @@
/*
* Copyright (c) 2000,2001 Fabrice Bellard
*
* MMI optimization by Leon van Stuivenberg
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/avcodec.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/mpegvideo.h"
static void dct_unquantize_h263_mmi(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
int level=0, qmul, qadd;
int nCoeffs;
assert(s->block_last_index[n]>=0);
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
if (s->mb_intra) {
if (!s->h263_aic) {
if (n < 4)
level = block[0] * s->y_dc_scale;
else
level = block[0] * s->c_dc_scale;
}else {
qadd = 0;
level = block[0];
}
nCoeffs= 63; //does not always use zigzag table
} else {
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
}
__asm__ volatile(
"add $14, $0, %3 \n\t"
"pcpyld $8, %0, %0 \n\t"
"pcpyh $8, $8 \n\t" //r8 = qmul
"pcpyld $9, %1, %1 \n\t"
"pcpyh $9, $9 \n\t" //r9 = qadd
".p2align 2 \n\t"
"1: \n\t"
"lq $10, 0($14) \n\t" //r10 = level
"addi $14, $14, 16 \n\t" //block+=8
"addi %2, %2, -8 \n\t"
"pcgth $11, $0, $10 \n\t" //r11 = level < 0 ? -1 : 0
"pcgth $12, $10, $0 \n\t" //r12 = level > 0 ? -1 : 0
"por $12, $11, $12 \n\t"
"pmulth $10, $10, $8 \n\t"
"paddh $13, $9, $11 \n\t"
"pxor $13, $13, $11 \n\t" //r13 = level < 0 ? -qadd : qadd
"pmfhl.uw $11 \n\t"
"pinteh $10, $11, $10 \n\t" //r10 = level * qmul
"paddh $10, $10, $13 \n\t"
"pand $10, $10, $12 \n\t"
"sq $10, -16($14) \n\t"
"bgez %2, 1b \n\t"
:: "r"(qmul), "r" (qadd), "r" (nCoeffs), "r" (block) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "memory" );
if(s->mb_intra)
block[0]= level;
}
void ff_MPV_common_init_mmi(MpegEncContext *s)
{
s->dct_unquantize_h263_intra =
s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;
}

View File

@ -189,8 +189,6 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
ff_MPV_common_init_x86(s); ff_MPV_common_init_x86(s);
#elif ARCH_ALPHA #elif ARCH_ALPHA
ff_MPV_common_init_axp(s); ff_MPV_common_init_axp(s);
#elif HAVE_MMI
ff_MPV_common_init_mmi(s);
#elif ARCH_ARM #elif ARCH_ARM
ff_MPV_common_init_arm(s); ff_MPV_common_init_arm(s);
#elif HAVE_ALTIVEC #elif HAVE_ALTIVEC

View File

@ -776,7 +776,6 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
void ff_dct_encode_init_x86(MpegEncContext *s); void ff_dct_encode_init_x86(MpegEncContext *s);
void ff_MPV_common_init_x86(MpegEncContext *s); void ff_MPV_common_init_x86(MpegEncContext *s);
void ff_MPV_common_init_axp(MpegEncContext *s); void ff_MPV_common_init_axp(MpegEncContext *s);
void ff_MPV_common_init_mmi(MpegEncContext *s);
void ff_MPV_common_init_arm(MpegEncContext *s); void ff_MPV_common_init_arm(MpegEncContext *s);
void ff_MPV_common_init_altivec(MpegEncContext *s); void ff_MPV_common_init_altivec(MpegEncContext *s);
void ff_MPV_common_init_bfin(MpegEncContext *s); void ff_MPV_common_init_bfin(MpegEncContext *s);

View File

@ -27,6 +27,7 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "avcodec.h" #include "avcodec.h"
#include "version.h"
#include "config.h" #include "config.h"
#define OFFSET(x) offsetof(AVCodecContext,x) #define OFFSET(x) offsetof(AVCodecContext,x)
@ -203,7 +204,9 @@ static const AVOption options[]={
{"simple", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simple", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"},
{"simplemmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"simplemmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
{"libmpeg2mmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_LIBMPEG2MMX }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"libmpeg2mmx", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_LIBMPEG2MMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
#if FF_API_MMI
{"mmi", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_MMI }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"mmi", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_MMI }, INT_MIN, INT_MAX, V|E|D, "idct"},
#endif
{"arm", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"arm", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
{"altivec", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"altivec", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"},
{"sh4", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SH4 }, INT_MIN, INT_MAX, V|E|D, "idct"}, {"sh4", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SH4 }, INT_MIN, INT_MAX, V|E|D, "idct"},

View File

@ -100,5 +100,8 @@
#ifndef FF_API_LIBMPEG2 #ifndef FF_API_LIBMPEG2
#define FF_API_LIBMPEG2 (LIBAVCODEC_VERSION_MAJOR < 55) #define FF_API_LIBMPEG2 (LIBAVCODEC_VERSION_MAJOR < 55)
#endif #endif
#ifndef FF_API_MMI
#define FF_API_MMI (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */