diff --git a/libavcodec/mips/h264dsp_mmi.c b/libavcodec/mips/h264dsp_mmi.c index 14c4a4320e..a62bbabc67 100644 --- a/libavcodec/mips/h264dsp_mmi.c +++ b/libavcodec/mips/h264dsp_mmi.c @@ -25,38 +25,57 @@ #include "libavcodec/bit_depth_template.c" #include "h264dsp_mips.h" +#include "libavutil/mips/asmdefs.h" void ff_h264_add_pixels4_8_mmi(uint8_t *dst, int16_t *src, int stride) { + double ftmp[9]; + uint64_t low32; + __asm__ volatile ( - "xor $f0, $f0, $f0 \r\n" - "ldc1 $f2, 0(%[src]) \r\n" - "ldc1 $f4, 8(%[src]) \r\n" - "ldc1 $f6, 16(%[src]) \r\n" - "ldc1 $f8, 24(%[src]) \r\n" - "lwc1 $f10, 0(%[dst0]) \r\n" - "lwc1 $f12, 0(%[dst1]) \r\n" - "lwc1 $f14, 0(%[dst2]) \r\n" - "lwc1 $f16, 0(%[dst3]) \r\n" - "punpcklbh $f10, $f10, $f0 \r\n" - "punpcklbh $f12, $f12, $f0 \r\n" - "punpcklbh $f14, $f14, $f0 \r\n" - "punpcklbh $f16, $f16, $f0 \r\n" - "paddh $f2, $f2, $f10 \r\n" - "paddh $f4, $f4, $f12 \r\n" - "paddh $f6, $f6, $f14 \r\n" - "paddh $f8, $f8, $f16 \r\n" - "packushb $f2, $f2, $f0 \r\n" - "packushb $f4, $f4, $f0 \r\n" - "packushb $f6, $f6, $f0 \r\n" - "packushb $f8, $f8, $f0 \r\n" - "swc1 $f2, 0(%[dst0]) \r\n" - "swc1 $f4, 0(%[dst1]) \r\n" - "swc1 $f6, 0(%[dst2]) \r\n" - "swc1 $f8, 0(%[dst3]) \r\n" - ::[dst0]"r"(dst),[dst1]"r"(dst+stride),[dst2]"r"(dst+2*stride), - [dst3]"r"(dst+3*stride),[src]"r"(src) - : "$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16" + "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "ldc1 %[ftmp1], 0x00(%[src]) \n\t" + "ldc1 %[ftmp2], 0x08(%[src]) \n\t" + "ldc1 %[ftmp3], 0x10(%[src]) \n\t" + "ldc1 %[ftmp4], 0x18(%[src]) \n\t" + "uld %[low32], 0x00(%[dst0]) \n\t" + "mtc1 %[low32], %[ftmp5] \n\t" + "uld %[low32], 0x00(%[dst1]) \n\t" + "mtc1 %[low32], %[ftmp6] \n\t" + "uld %[low32], 0x00(%[dst2]) \n\t" + "mtc1 %[low32], %[ftmp7] \n\t" + "uld %[low32], 0x00(%[dst3]) \n\t" + "mtc1 %[low32], %[ftmp8] \n\t" + "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp8] \n\t" + "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "gsswlc1 %[ftmp1], 0x03(%[dst0]) \n\t" + "gsswrc1 %[ftmp1], 0x00(%[dst0]) \n\t" + "gsswlc1 %[ftmp2], 0x03(%[dst1]) \n\t" + "gsswrc1 %[ftmp2], 0x00(%[dst1]) \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst2]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst2]) \n\t" + "gsswlc1 %[ftmp4], 0x03(%[dst3]) \n\t" + "gsswrc1 %[ftmp4], 0x00(%[dst3]) \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), + [ftmp8]"=&f"(ftmp[8]), + [low32]"=&r"(low32) + : [dst0]"r"(dst), [dst1]"r"(dst+stride), + [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride), + [src]"r"(src) + : "memory" ); memset(src, 0, 32); @@ -64,79 +83,94 @@ void ff_h264_add_pixels4_8_mmi(uint8_t *dst, int16_t *src, int stride) void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { + double ftmp[12]; + uint64_t tmp[1]; + uint64_t low32; + __asm__ volatile ( - "dli $8, 1 \r\n" - "ldc1 $f0, 0(%[block]) \r\n" - "dmtc1 $8, $f16 \r\n" - "ldc1 $f2, 8(%[block]) \r\n" - "dli $8, 6 \r\n" - "ldc1 $f4, 16(%[block]) \r\n" - "dmtc1 $8, $f18 \r\n" - "psrah $f8, $f2, $f16 \r\n" - "ldc1 $f6, 24(%[block]) \r\n" - "psrah $f10, $f6, $f16 \r\n" - "psubh $f8, $f8, $f6 \r\n" - "paddh $f10, $f10, $f2 \r\n" - "paddh $f20, $f4, $f0 \r\n" - "psubh $f0, $f0, $f4 \r\n" - "paddh $f22, $f10, $f20 \r\n" - "psubh $f4, $f20, $f10 \r\n" - "paddh $f20, $f8, $f0 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "punpckhhw $f2, $f22, $f20 \r\n" - "punpcklhw $f10, $f22, $f20 \r\n" - "punpckhhw $f8, $f0, $f4 \r\n" - "punpcklhw $f0, $f0, $f4 \r\n" - "punpckhwd $f4, $f10, $f0 \r\n" - "punpcklwd $f10, $f10, $f0 \r\n" - "punpcklwd $f20, $f2, $f8 \r\n" - "punpckhwd $f0, $f2, $f8 \r\n" - "paddh $f10, $f10, %[ff_pw_32] \r\n" - "psrah $f8, $f4, $f16 \r\n" - "psrah $f6, $f0, $f16 \r\n" - "psubh $f8, $f8, $f0 \r\n" - "paddh $f6, $f6, $f4 \r\n" - "paddh $f2, $f20, $f10 \r\n" - "psubh $f10, $f10, $f20 \r\n" - "paddh $f20, $f6, $f2 \r\n" - "psubh $f2, $f2, $f6 \r\n" - "paddh $f22, $f8, $f10 \r\n" - "xor $f14, $f14, $f14 \r\n" - "psubh $f10, $f10, $f8 \r\n" - "sdc1 $f14, 0(%[block]) \r\n" - "sdc1 $f14, 8(%[block]) \r\n" - "sdc1 $f14, 16(%[block]) \r\n" - "sdc1 $f14, 24(%[block]) \r\n" - "lwc1 $f4, 0(%[dst]) \r\n" - "psrah $f6, $f20, $f18 \r\n" - "gslwxc1 $f0, 0(%[dst], %[stride]) \r\n" - "psrah $f8, $f22, $f18 \r\n" - "punpcklbh $f4, $f4, $f14 \r\n" - "punpcklbh $f0, $f0, $f14 \r\n" - "paddh $f4, $f4, $f6 \r\n" - "paddh $f0, $f0, $f8 \r\n" - "packushb $f4, $f4, $f14 \r\n" - "packushb $f0, $f0, $f14 \r\n" - "swc1 $f4, 0(%[dst]) \r\n" - "gsswxc1 $f0, 0(%[dst], %[stride]) \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "lwc1 $f4, 0(%[dst]) \r\n" - "psrah $f10, $f10, $f18 \r\n" - "gslwxc1 $f0, 0(%[dst], %[stride]) \r\n" - "psrah $f2, $f2, $f18 \r\n" - "punpcklbh $f4, $f4, $f14 \r\n" - "punpcklbh $f0, $f0, $f14 \r\n" - "paddh $f4, $f4, $f10 \r\n" - "paddh $f0, $f0, $f2 \r\n" - "packushb $f4, $f4, $f14 \r\n" - "swc1 $f4, 0(%[dst]) \r\n" - "packushb $f0, $f0, $f14 \r\n" - "gsswxc1 $f0, 0(%[dst], %[stride]) \r\n" - ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride), - [ff_pw_32]"f"(ff_pw_32) - : "$8","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16", - "$f18","$f20","$f22" + "dli %[tmp0], 0x01 \n\t" + "ldc1 %[ftmp0], 0x00(%[block]) \n\t" + "mtc1 %[tmp0], %[ftmp8] \n\t" + "ldc1 %[ftmp1], 0x08(%[block]) \n\t" + "dli %[tmp0], 0x06 \n\t" + "ldc1 %[ftmp2], 0x10(%[block]) \n\t" + "mtc1 %[tmp0], %[ftmp9] \n\t" + "psrah %[ftmp4], %[ftmp1], %[ftmp8] \n\t" + "ldc1 %[ftmp3], 0x18(%[block]) \n\t" + "psrah %[ftmp5], %[ftmp3], %[ftmp8] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" + "paddh %[ftmp10], %[ftmp2], %[ftmp0] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp11], %[ftmp5], %[ftmp10] \n\t" + "psubh %[ftmp2], %[ftmp10], %[ftmp5] \n\t" + "paddh %[ftmp10], %[ftmp4], %[ftmp0] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "punpckhhw %[ftmp1], %[ftmp11], %[ftmp10] \n\t" + "punpcklhw %[ftmp5], %[ftmp11], %[ftmp10] \n\t" + "punpckhhw %[ftmp4], %[ftmp0], %[ftmp2] \n\t" + "punpcklhw %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "punpckhwd %[ftmp2], %[ftmp5], %[ftmp0] \n\t" + "punpcklwd %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "punpcklwd %[ftmp10], %[ftmp1], %[ftmp4] \n\t" + "punpckhwd %[ftmp0], %[ftmp1], %[ftmp4] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ff_pw_32] \n\t" + "psrah %[ftmp4], %[ftmp2], %[ftmp8] \n\t" + "psrah %[ftmp3], %[ftmp0], %[ftmp8] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "paddh %[ftmp1], %[ftmp10], %[ftmp5] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp10] \n\t" + "paddh %[ftmp10], %[ftmp3], %[ftmp1] \n\t" + "psubh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" + "paddh %[ftmp11], %[ftmp4], %[ftmp5] \n\t" + "xor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" + "sdc1 %[ftmp7], 0x00(%[block]) \n\t" + "sdc1 %[ftmp7], 0x08(%[block]) \n\t" + "sdc1 %[ftmp7], 0x10(%[block]) \n\t" + "sdc1 %[ftmp7], 0x18(%[block]) \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp2] \n\t" + "psrah %[ftmp3], %[ftmp10], %[ftmp9] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp4], %[ftmp11], %[ftmp9] \n\t" + "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "gsswlc1 %[ftmp2], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp2], 0x00(%[dst]) \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp2] \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp9] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t" + "punpcklbh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "gsswlc1 %[ftmp2], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp2], 0x00(%[dst]) \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), + [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), + [tmp0]"=&r"(tmp[0]), + [low32]"=&r"(low32) + : [dst]"r"(dst), [block]"r"(block), + [stride]"r"((mips_reg)stride), [ff_pw_32]"f"(ff_pw_32) + : "memory" ); memset(block, 0, 32); @@ -144,448 +178,482 @@ void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride) void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { + double ftmp[16]; + uint64_t tmp[8]; + mips_reg addr[1]; + uint64_t low32; + __asm__ volatile ( - "lhu $10, 0x0(%[block]) \r\n" - "daddiu $29, $29, -0x20 \r\n" - "daddiu $10, $10, 0x20 \r\n" - "ldc1 $f2, 0x10(%[block]) \r\n" - "sh $10, 0x0(%[block]) \r\n" - "ldc1 $f4, 0x20(%[block]) \r\n" - "dli $10, 0x1 \r\n" - "ldc1 $f6, 0x30(%[block]) \r\n" - "dmtc1 $10, $f16 \r\n" - "ldc1 $f10, 0x50(%[block]) \r\n" - "ldc1 $f12, 0x60(%[block]) \r\n" - "ldc1 $f14, 0x70(%[block]) \r\n" - "mov.d $f0, $f2 \r\n" - "psrah $f2, $f2, $f16 \r\n" - "psrah $f8, $f10, $f16 \r\n" - "paddh $f2, $f2, $f0 \r\n" - "paddh $f8, $f8, $f10 \r\n" - "paddh $f2, $f2, $f10 \r\n" - "paddh $f8, $f8, $f14 \r\n" - "paddh $f2, $f2, $f6 \r\n" - "psubh $f8, $f8, $f0 \r\n" - "psubh $f0, $f0, $f6 \r\n" - "psubh $f10, $f10, $f6 \r\n" - "psrah $f6, $f6, $f16 \r\n" - "paddh $f0, $f0, $f14 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "psrah $f14, $f14, $f16 \r\n" - "psubh $f0, $f0, $f6 \r\n" - "dli $10, 0x2 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "dmtc1 $10, $f18 \r\n" - "mov.d $f14, $f2 \r\n" - "psrah $f2, $f2, $f18 \r\n" - "psrah $f6, $f8, $f18 \r\n" - "paddh $f6, $f6, $f0 \r\n" - "psrah $f0, $f0, $f18 \r\n" - "paddh $f2, $f2, $f10 \r\n" - "psrah $f10, $f10, $f18 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "psubh $f14, $f14, $f10 \r\n" - "mov.d $f10, $f12 \r\n" - "psrah $f12, $f12, $f16 \r\n" - "psrah $f8, $f4, $f16 \r\n" - "paddh $f12, $f12, $f4 \r\n" - "psubh $f8, $f8, $f10 \r\n" - "ldc1 $f4, 0x0(%[block]) \r\n" - "ldc1 $f10, 0x40(%[block]) \r\n" - "paddh $f10, $f10, $f4 \r\n" - "paddh $f4, $f4, $f4 \r\n" - "paddh $f12, $f12, $f10 \r\n" - "psubh $f4, $f4, $f10 \r\n" - "paddh $f10, $f10, $f10 \r\n" - "paddh $f8, $f8, $f4 \r\n" - "psubh $f10, $f10, $f12 \r\n" - "paddh $f4, $f4, $f4 \r\n" - "paddh $f14, $f14, $f12 \r\n" - "psubh $f4, $f4, $f8 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f0, $f0, $f8 \r\n" - "psubh $f12, $f12, $f14 \r\n" - "paddh $f8, $f8, $f8 \r\n" - "paddh $f6, $f6, $f4 \r\n" - "psubh $f8, $f8, $f0 \r\n" - "paddh $f4, $f4, $f4 \r\n" - "paddh $f2, $f2, $f10 \r\n" - "psubh $f4, $f4, $f6 \r\n" - "paddh $f10, $f10, $f10 \r\n" - "sdc1 $f12, 0x0(%[block]) \r\n" - "psubh $f10, $f10, $f2 \r\n" - "punpckhhw $f12, $f14, $f0 \r\n" - "punpcklhw $f14, $f14, $f0 \r\n" - "punpckhhw $f0, $f6, $f2 \r\n" - "punpcklhw $f6, $f6, $f2 \r\n" - "punpckhwd $f2, $f14, $f6 \r\n" - "punpcklwd $f14, $f14, $f6 \r\n" - "punpckhwd $f6, $f12, $f0 \r\n" - "punpcklwd $f12, $f12, $f0 \r\n" - "ldc1 $f0, 0x0(%[block]) \r\n" - "sdc1 $f14, 0x0($29) \r\n" - "sdc1 $f2, 0x10($29) \r\n" - "dmfc1 $8, $f12 \r\n" - "dmfc1 $11, $f6 \r\n" - "punpckhhw $f6, $f10, $f4 \r\n" - "punpcklhw $f10, $f10, $f4 \r\n" - "punpckhhw $f4, $f8, $f0 \r\n" - "punpcklhw $f8, $f8, $f0 \r\n" - "punpckhwd $f0, $f10, $f8 \r\n" - "punpcklwd $f10, $f10, $f8 \r\n" - "punpckhwd $f8, $f6, $f4 \r\n" - "punpcklwd $f6, $f6, $f4 \r\n" - "sdc1 $f10, 0x8($29) \r\n" - "sdc1 $f0, 0x18($29) \r\n" - "dmfc1 $9, $f6 \r\n" - "dmfc1 $12, $f8 \r\n" - "ldc1 $f2, 0x18(%[block]) \r\n" - "ldc1 $f12, 0x28(%[block]) \r\n" - "ldc1 $f4, 0x38(%[block]) \r\n" - "ldc1 $f0, 0x58(%[block]) \r\n" - "ldc1 $f6, 0x68(%[block]) \r\n" - "ldc1 $f8, 0x78(%[block]) \r\n" - "mov.d $f14, $f2 \r\n" - "psrah $f10, $f0, $f16 \r\n" - "psrah $f2, $f2, $f16 \r\n" - "paddh $f10, $f10, $f0 \r\n" - "paddh $f2, $f2, $f14 \r\n" - "paddh $f10, $f10, $f8 \r\n" - "paddh $f2, $f2, $f0 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "paddh $f2, $f2, $f4 \r\n" - "psubh $f14, $f14, $f4 \r\n" - "psubh $f0, $f0, $f4 \r\n" - "psrah $f4, $f4, $f16 \r\n" - "paddh $f14, $f14, $f8 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "psrah $f8, $f8, $f16 \r\n" - "psubh $f14, $f14, $f4 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "mov.d $f8, $f2 \r\n" - "psrah $f4, $f10, $f18 \r\n" - "psrah $f2, $f2, $f18 \r\n" - "paddh $f4, $f4, $f14 \r\n" - "psrah $f14, $f14, $f18 \r\n" - "paddh $f2, $f2, $f0 \r\n" - "psrah $f0, $f0, $f18 \r\n" - "psubh $f14, $f14, $f10 \r\n" - "psubh $f8, $f8, $f0 \r\n" - "mov.d $f0, $f6 \r\n" - "psrah $f6, $f6, $f16 \r\n" - "psrah $f10, $f12, $f16 \r\n" - "paddh $f6, $f6, $f12 \r\n" - "psubh $f10, $f10, $f0 \r\n" - "ldc1 $f12, 0x8(%[block]) \r\n" - "ldc1 $f0, 0x48(%[block]) \r\n" - "paddh $f0, $f0, $f12 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f6, $f6, $f0 \r\n" - "psubh $f12, $f12, $f0 \r\n" - "paddh $f0, $f0, $f0 \r\n" - "paddh $f10, $f10, $f12 \r\n" - "psubh $f0, $f0, $f6 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f8, $f8, $f6 \r\n" - "psubh $f12, $f12, $f10 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "paddh $f14, $f14, $f10 \r\n" - "psubh $f6, $f6, $f8 \r\n" - "paddh $f10, $f10, $f10 \r\n" - "paddh $f4, $f4, $f12 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f2, $f2, $f0 \r\n" - "psubh $f12, $f12, $f4 \r\n" - "paddh $f0, $f0, $f0 \r\n" - "sdc1 $f6, 0x8(%[block]) \r\n" - "psubh $f0, $f0, $f2 \r\n" - "punpckhhw $f6, $f8, $f14 \r\n" - "punpcklhw $f8, $f8, $f14 \r\n" - "punpckhhw $f14, $f4, $f2 \r\n" - "punpcklhw $f4, $f4, $f2 \r\n" - "punpckhwd $f2, $f8, $f4 \r\n" - "punpcklwd $f8, $f8, $f4 \r\n" - "punpckhwd $f4, $f6, $f14 \r\n" - "punpcklwd $f6, $f6, $f14 \r\n" - "ldc1 $f14, 0x8(%[block]) \r\n" - "dmfc1 $13, $f8 \r\n" - "dmfc1 $15, $f2 \r\n" - "mov.d $f24, $f6 \r\n" - "mov.d $f28, $f4 \r\n" - "punpckhhw $f4, $f0, $f12 \r\n" - "punpcklhw $f0, $f0, $f12 \r\n" - "punpckhhw $f12, $f10, $f14 \r\n" - "punpcklhw $f10, $f10, $f14 \r\n" - "punpckhwd $f14, $f0, $f10 \r\n" - "punpcklwd $f0, $f0, $f10 \r\n" - "punpckhwd $f10, $f4, $f12 \r\n" - "punpcklwd $f4, $f4, $f12 \r\n" - "dmfc1 $14, $f0 \r\n" - "mov.d $f22, $f14 \r\n" - "mov.d $f26, $f4 \r\n" - "mov.d $f30, $f10 \r\n" - "daddiu $10, %[dst], 0x4 \r\n" - "dmtc1 $15, $f14 \r\n" - "dmtc1 $11, $f12 \r\n" - "ldc1 $f2, 0x10($29) \r\n" - "dmtc1 $8, $f6 \r\n" - "mov.d $f8, $f2 \r\n" - "psrah $f2, $f2, $f16 \r\n" - "psrah $f0, $f14, $f16 \r\n" - "paddh $f2, $f2, $f8 \r\n" - "paddh $f0, $f0, $f14 \r\n" - "paddh $f2, $f2, $f14 \r\n" - "paddh $f0, $f0, $f28 \r\n" - "paddh $f2, $f2, $f12 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "psubh $f8, $f8, $f12 \r\n" - "psubh $f14, $f14, $f12 \r\n" - "psrah $f12, $f12, $f16 \r\n" - "paddh $f8, $f8, $f28 \r\n" - "psubh $f14, $f14, $f28 \r\n" - "psrah $f10, $f28, $f16 \r\n" - "psubh $f8, $f8, $f12 \r\n" - "psubh $f14, $f14, $f10 \r\n" - "mov.d $f10, $f2 \r\n" - "psrah $f2, $f2, $f18 \r\n" - "psrah $f12, $f0, $f18 \r\n" - "paddh $f2, $f2, $f14 \r\n" - "paddh $f12, $f12, $f8 \r\n" - "psrah $f8, $f8, $f18 \r\n" - "psrah $f14, $f14, $f18 \r\n" - "psubh $f8, $f8, $f0 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "mov.d $f14, $f24 \r\n" - "psrah $f4, $f24, $f16 \r\n" - "psrah $f0, $f6, $f16 \r\n" - "paddh $f4, $f4, $f6 \r\n" - "psubh $f0, $f0, $f14 \r\n" - "ldc1 $f6, 0x0($29) \r\n" - "dmtc1 $13, $f14 \r\n" - "paddh $f14, $f14, $f6 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "paddh $f4, $f4, $f14 \r\n" - "psubh $f6, $f6, $f14 \r\n" - "paddh $f14, $f14, $f14 \r\n" - "paddh $f0, $f0, $f6 \r\n" - "psubh $f14, $f14, $f4 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "paddh $f10, $f10, $f4 \r\n" - "psubh $f6, $f6, $f0 \r\n" - "paddh $f4, $f4, $f4 \r\n" - "paddh $f8, $f8, $f0 \r\n" - "psubh $f4, $f4, $f10 \r\n" - "paddh $f0, $f0, $f0 \r\n" - "paddh $f12, $f12, $f6 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "paddh $f2, $f2, $f14 \r\n" - "psubh $f6, $f6, $f12 \r\n" - "paddh $f14, $f14, $f14 \r\n" - "sdc1 $f6, 0x0($29) \r\n" - "psubh $f14, $f14, $f2 \r\n" - "sdc1 $f0, 0x10($29) \r\n" - "dmfc1 $8, $f4 \r\n" - "xor $f4, $f4, $f4 \r\n" - "sdc1 $f4, 0x0(%[block]) \r\n" - "sdc1 $f4, 0x8(%[block]) \r\n" - "sdc1 $f4, 0x10(%[block]) \r\n" - "sdc1 $f4, 0x18(%[block]) \r\n" - "sdc1 $f4, 0x20(%[block]) \r\n" - "sdc1 $f4, 0x28(%[block]) \r\n" - "sdc1 $f4, 0x30(%[block]) \r\n" - "sdc1 $f4, 0x38(%[block]) \r\n" - "sdc1 $f4, 0x40(%[block]) \r\n" - "sdc1 $f4, 0x48(%[block]) \r\n" - "sdc1 $f4, 0x50(%[block]) \r\n" - "sdc1 $f4, 0x58(%[block]) \r\n" - "sdc1 $f4, 0x60(%[block]) \r\n" - "sdc1 $f4, 0x68(%[block]) \r\n" - "sdc1 $f4, 0x70(%[block]) \r\n" - "sdc1 $f4, 0x78(%[block]) \r\n" - "dli $11, 0x6 \r\n" - "lwc1 $f6, 0x0(%[dst]) \r\n" - "dmtc1 $11, $f20 \r\n" - "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "psrah $f10, $f10, $f20 \r\n" - "psrah $f8, $f8, $f20 \r\n" - "punpcklbh $f6, $f6, $f4 \r\n" - "punpcklbh $f0, $f0, $f4 \r\n" - "paddh $f6, $f6, $f10 \r\n" - "paddh $f0, $f0, $f8 \r\n" - "packushb $f6, $f6, $f4 \r\n" - "packushb $f0, $f0, $f4 \r\n" - "swc1 $f6, 0x0(%[dst]) \r\n" - "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "lwc1 $f6, 0x0(%[dst]) \r\n" - "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "psrah $f12, $f12, $f20 \r\n" - "psrah $f2, $f2, $f20 \r\n" - "punpcklbh $f6, $f6, $f4 \r\n" - "punpcklbh $f0, $f0, $f4 \r\n" - "paddh $f6, $f6, $f12 \r\n" - "paddh $f0, $f0, $f2 \r\n" - "packushb $f6, $f6, $f4 \r\n" - "packushb $f0, $f0, $f4 \r\n" - "swc1 $f6, 0x0(%[dst]) \r\n" - "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "ldc1 $f10, 0x0($29) \r\n" - "ldc1 $f8, 0x10($29) \r\n" - "dmtc1 $8, $f12 \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "lwc1 $f6, 0x0(%[dst]) \r\n" - "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "psrah $f14, $f14, $f20 \r\n" - "psrah $f10, $f10, $f20 \r\n" - "punpcklbh $f6, $f6, $f4 \r\n" - "punpcklbh $f0, $f0, $f4 \r\n" - "paddh $f6, $f6, $f14 \r\n" - "paddh $f0, $f0, $f10 \r\n" - "packushb $f6, $f6, $f4 \r\n" - "packushb $f0, $f0, $f4 \r\n" - "swc1 $f6, 0x0(%[dst]) \r\n" - "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "daddu %[dst], %[dst], %[stride] \r\n" - "lwc1 $f6, 0x0(%[dst]) \r\n" - "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "psrah $f8, $f8, $f20 \r\n" - "psrah $f12, $f12, $f20 \r\n" - "punpcklbh $f6, $f6, $f4 \r\n" - "punpcklbh $f0, $f0, $f4 \r\n" - "paddh $f6, $f6, $f8 \r\n" - "paddh $f0, $f0, $f12 \r\n" - "packushb $f6, $f6, $f4 \r\n" - "packushb $f0, $f0, $f4 \r\n" - "swc1 $f6, 0x0(%[dst]) \r\n" - "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" - "dmtc1 $12, $f2 \r\n" - "dmtc1 $9, $f12 \r\n" - "ldc1 $f8, 0x18($29) \r\n" - "mov.d $f10, $f8 \r\n" - "psrah $f8, $f8, $f16 \r\n" - "psrah $f14, $f22, $f16 \r\n" - "paddh $f14, $f14, $f22 \r\n" - "paddh $f8, $f8, $f10 \r\n" - "paddh $f14, $f14, $f30 \r\n" - "paddh $f8, $f8, $f22 \r\n" - "psubh $f14, $f14, $f10 \r\n" - "paddh $f8, $f8, $f2 \r\n" - "psubh $f10, $f10, $f2 \r\n" - "psubh $f6, $f22, $f2 \r\n" - "psrah $f2, $f2, $f16 \r\n" - "paddh $f10, $f10, $f30 \r\n" - "psubh $f6, $f6, $f30 \r\n" - "psrah $f4, $f30, $f16 \r\n" - "psubh $f10, $f10, $f2 \r\n" - "psubh $f6, $f6, $f4 \r\n" - "mov.d $f4, $f8 \r\n" - "psrah $f8, $f8, $f18 \r\n" - "psrah $f2, $f14, $f18 \r\n" - "paddh $f8, $f8, $f6 \r\n" - "paddh $f2, $f2, $f10 \r\n" - "psrah $f10, $f10, $f18 \r\n" - "psrah $f6, $f6, $f18 \r\n" - "psubh $f10, $f10, $f14 \r\n" - "psubh $f4, $f4, $f6 \r\n" - "mov.d $f6, $f26 \r\n" - "psrah $f0, $f26, $f16 \r\n" - "psrah $f14, $f12, $f16 \r\n" - "paddh $f0, $f0, $f12 \r\n" - "psubh $f14, $f14, $f6 \r\n" - "ldc1 $f12, 0x8($29) \r\n" - "dmtc1 $14, $f6 \r\n" - "paddh $f6, $f6, $f12 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f0, $f0, $f6 \r\n" - "psubh $f12, $f12, $f6 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "paddh $f14, $f14, $f12 \r\n" - "psubh $f6, $f6, $f0 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f4, $f4, $f0 \r\n" - "psubh $f12, $f12, $f14 \r\n" - "paddh $f0, $f0, $f0 \r\n" - "paddh $f10, $f10, $f14 \r\n" - "psubh $f0, $f0, $f4 \r\n" - "paddh $f14, $f14, $f14 \r\n" - "paddh $f2, $f2, $f12 \r\n" - "psubh $f14, $f14, $f10 \r\n" - "paddh $f12, $f12, $f12 \r\n" - "paddh $f8, $f8, $f6 \r\n" - "psubh $f12, $f12, $f2 \r\n" - "paddh $f6, $f6, $f6 \r\n" - "sdc1 $f12, 0x8($29) \r\n" - "psubh $f6, $f6, $f8 \r\n" - "sdc1 $f14, 0x18($29) \r\n" - "dmfc1 $9, $f0 \r\n" - "xor $f0, $f0, $f0 \r\n" - "lwc1 $f12, 0x0($10) \r\n" - "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" - "psrah $f4, $f4, $f20 \r\n" - "psrah $f10, $f10, $f20 \r\n" - "punpcklbh $f12, $f12, $f0 \r\n" - "punpcklbh $f14, $f14, $f0 \r\n" - "paddh $f12, $f12, $f4 \r\n" - "paddh $f14, $f14, $f10 \r\n" - "packushb $f12, $f12, $f0 \r\n" - "packushb $f14, $f14, $f0 \r\n" - "swc1 $f12, 0x0($10) \r\n" - "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" - "daddu $10, $10, %[stride] \r\n" - "daddu $10, $10, %[stride] \r\n" - "lwc1 $f12, 0x0($10) \r\n" - "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" - "psrah $f2, $f2, $f20 \r\n" - "psrah $f8, $f8, $f20 \r\n" - "punpcklbh $f12, $f12, $f0 \r\n" - "punpcklbh $f14, $f14, $f0 \r\n" - "paddh $f12, $f12, $f2 \r\n" - "paddh $f14, $f14, $f8 \r\n" - "packushb $f12, $f12, $f0 \r\n" - "packushb $f14, $f14, $f0 \r\n" - "swc1 $f12, 0x0($10) \r\n" - "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" - "ldc1 $f4, 0x8($29) \r\n" - "ldc1 $f10, 0x18($29) \r\n" - "daddu $10, $10, %[stride] \r\n" - "dmtc1 $9, $f2 \r\n" - "daddu $10, $10, %[stride] \r\n" - "lwc1 $f12, 0x0($10) \r\n" - "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" - "psrah $f6, $f6, $f20 \r\n" - "psrah $f4, $f4, $f20 \r\n" - "punpcklbh $f12, $f12, $f0 \r\n" - "punpcklbh $f14, $f14, $f0 \r\n" - "paddh $f12, $f12, $f6 \r\n" - "paddh $f14, $f14, $f4 \r\n" - "packushb $f12, $f12, $f0 \r\n" - "packushb $f14, $f14, $f0 \r\n" - "swc1 $f12, 0x0($10) \r\n" - "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" - "daddu $10, $10, %[stride] \r\n" - "daddu $10, $10, %[stride] \r\n" - "lwc1 $f12, 0x0($10) \r\n" - "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" - "psrah $f10, $f10, $f20 \r\n" - "psrah $f2, $f2, $f20 \r\n" - "punpcklbh $f12, $f12, $f0 \r\n" - "punpcklbh $f14, $f14, $f0 \r\n" - "paddh $f12, $f12, $f10 \r\n" - "paddh $f14, $f14, $f2 \r\n" - "packushb $f12, $f12, $f0 \r\n" - "packushb $f14, $f14, $f0 \r\n" - "swc1 $f12, 0x0($10) \r\n" - "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" - "daddiu $29, $29, 0x20 \r\n" - ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) - :"$8","$9","$10","$11","$12","$13","$14","$15","$29","$f0","$f2","$f4", - "$f8","$f10","$f12","$f14","$f16","$f18","$f20","$f22","$f24","$f26", - "$f28","$f30" + "lhu %[tmp0], 0x00(%[block]) \n\t" + PTR_ADDI "$29, $29, -0x20 \n\t" + PTR_ADDIU "%[tmp0], %[tmp0], 0x20 \n\t" + "ldc1 %[ftmp1], 0x10(%[block]) \n\t" + "sh %[tmp0], 0x00(%[block]) \n\t" + "ldc1 %[ftmp2], 0x20(%[block]) \n\t" + "dli %[tmp0], 0x01 \n\t" + "ldc1 %[ftmp3], 0x30(%[block]) \n\t" + "mtc1 %[tmp0], %[ftmp8] \n\t" + "ldc1 %[ftmp5], 0x50(%[block]) \n\t" + "ldc1 %[ftmp6], 0x60(%[block]) \n\t" + "ldc1 %[ftmp7], 0x70(%[block]) \n\t" + "mov.d %[ftmp0], %[ftmp1] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "psrah %[ftmp4], %[ftmp5], %[ftmp8] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp7] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp3] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp3] \n\t" + "psrah %[ftmp3], %[ftmp3], %[ftmp8] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psrah %[ftmp7], %[ftmp7], %[ftmp8] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp3] \n\t" + "dli %[tmp0], 0x02 \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "mtc1 %[tmp0], %[ftmp9] \n\t" + "mov.d %[ftmp7], %[ftmp1] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t" + "psrah %[ftmp3], %[ftmp4], %[ftmp9] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp9] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "mov.d %[ftmp5], %[ftmp6] \n\t" + "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" + "psrah %[ftmp4], %[ftmp2], %[ftmp8] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "ldc1 %[ftmp2], 0x00(%[block]) \n\t" + "ldc1 %[ftmp5], 0x40(%[block]) \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp2] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp5] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp2] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp2] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp6] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp7] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp4] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp2] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp5] \n\t" + "sdc1 %[ftmp6], 0x00(%[block]) \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" + "punpckhhw %[ftmp6], %[ftmp7], %[ftmp0] \n\t" + "punpcklhw %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "punpckhhw %[ftmp0], %[ftmp3], %[ftmp1] \n\t" + "punpcklhw %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "punpckhwd %[ftmp1], %[ftmp7], %[ftmp3] \n\t" + "punpcklwd %[ftmp7], %[ftmp7], %[ftmp3] \n\t" + "punpckhwd %[ftmp3], %[ftmp6], %[ftmp0] \n\t" + "punpcklwd %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "ldc1 %[ftmp0], 0x00(%[block]) \n\t" + "sdc1 %[ftmp7], 0x00($29) \n\t" + "sdc1 %[ftmp1], 0x10($29) \n\t" + "dmfc1 %[tmp1], %[ftmp6] \n\t" + "dmfc1 %[tmp3], %[ftmp3] \n\t" + "punpckhhw %[ftmp3], %[ftmp5], %[ftmp2] \n\t" + "punpcklhw %[ftmp5], %[ftmp5], %[ftmp2] \n\t" + "punpckhhw %[ftmp2], %[ftmp4], %[ftmp0] \n\t" + "punpcklhw %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "punpckhwd %[ftmp0], %[ftmp5], %[ftmp4] \n\t" + "punpcklwd %[ftmp5], %[ftmp5], %[ftmp4] \n\t" + "punpckhwd %[ftmp4], %[ftmp3], %[ftmp2] \n\t" + "punpcklwd %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "sdc1 %[ftmp5], 0x08($29) \n\t" + "sdc1 %[ftmp0], 0x18($29) \n\t" + "dmfc1 %[tmp2], %[ftmp3] \n\t" + "dmfc1 %[tmp4], %[ftmp4] \n\t" + "ldc1 %[ftmp1], 0x18(%[block]) \n\t" + "ldc1 %[ftmp6], 0x28(%[block]) \n\t" + "ldc1 %[ftmp2], 0x38(%[block]) \n\t" + "ldc1 %[ftmp0], 0x58(%[block]) \n\t" + "ldc1 %[ftmp3], 0x68(%[block]) \n\t" + "ldc1 %[ftmp4], 0x78(%[block]) \n\t" + "mov.d %[ftmp7], %[ftmp1] \n\t" + "psrah %[ftmp5], %[ftmp0], %[ftmp8] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp4] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "psrah %[ftmp2], %[ftmp2], %[ftmp8] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp4] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp8] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "mov.d %[ftmp4], %[ftmp1] \n\t" + "psrah %[ftmp2], %[ftmp5], %[ftmp9] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "psrah %[ftmp7], %[ftmp7], %[ftmp9] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "psrah %[ftmp0], %[ftmp0], %[ftmp9] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "mov.d %[ftmp0], %[ftmp3] \n\t" + "psrah %[ftmp3], %[ftmp3], %[ftmp8] \n\t" + "psrah %[ftmp5], %[ftmp6], %[ftmp8] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "ldc1 %[ftmp6], 0x08(%[block]) \n\t" + "ldc1 %[ftmp0], 0x48(%[block]) \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp6] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp3] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp5] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "sdc1 %[ftmp3], 0x08(%[block]) \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "punpckhhw %[ftmp3], %[ftmp4], %[ftmp7] \n\t" + "punpcklhw %[ftmp4], %[ftmp4], %[ftmp7] \n\t" + "punpckhhw %[ftmp7], %[ftmp2], %[ftmp1] \n\t" + "punpcklhw %[ftmp2], %[ftmp2], %[ftmp1] \n\t" + "punpckhwd %[ftmp1], %[ftmp4], %[ftmp2] \n\t" + "punpcklwd %[ftmp4], %[ftmp4], %[ftmp2] \n\t" + "punpckhwd %[ftmp2], %[ftmp3], %[ftmp7] \n\t" + "punpcklwd %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "ldc1 %[ftmp7], 0x08(%[block]) \n\t" + "dmfc1 %[tmp5], %[ftmp4] \n\t" + "dmfc1 %[tmp7], %[ftmp1] \n\t" + "mov.d %[ftmp12], %[ftmp3] \n\t" + "mov.d %[ftmp14], %[ftmp2] \n\t" + "punpckhhw %[ftmp2], %[ftmp0], %[ftmp6] \n\t" + "punpcklhw %[ftmp0], %[ftmp0], %[ftmp6] \n\t" + "punpckhhw %[ftmp6], %[ftmp5], %[ftmp7] \n\t" + "punpcklhw %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "punpckhwd %[ftmp7], %[ftmp0], %[ftmp5] \n\t" + "punpcklwd %[ftmp0], %[ftmp0], %[ftmp5] \n\t" + "punpckhwd %[ftmp5], %[ftmp2], %[ftmp6] \n\t" + "punpcklwd %[ftmp2], %[ftmp2], %[ftmp6] \n\t" + "dmfc1 %[tmp6], %[ftmp0] \n\t" + "mov.d %[ftmp11], %[ftmp7] \n\t" + "mov.d %[ftmp13], %[ftmp2] \n\t" + "mov.d %[ftmp15], %[ftmp5] \n\t" + PTR_ADDIU "%[addr0], %[dst], 0x04 \n\t" + "dmtc1 %[tmp7], %[ftmp7] \n\t" + "dmtc1 %[tmp3], %[ftmp6] \n\t" + "ldc1 %[ftmp1], 0x10($29) \n\t" + "dmtc1 %[tmp1], %[ftmp3] \n\t" + "mov.d %[ftmp4], %[ftmp1] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "psrah %[ftmp0], %[ftmp7], %[ftmp8] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp4] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp14] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp6] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp6] \n\t" + "psrah %[ftmp6], %[ftmp6], %[ftmp8] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp14] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp14] \n\t" + "psrah %[ftmp5], %[ftmp14], %[ftmp8] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp6] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "mov.d %[ftmp5], %[ftmp1] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp9] \n\t" + "psrah %[ftmp6], %[ftmp0], %[ftmp9] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp4] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "psrah %[ftmp7], %[ftmp7], %[ftmp9] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "mov.d %[ftmp7], %[ftmp12] \n\t" + "psrah %[ftmp2], %[ftmp12], %[ftmp8] \n\t" + "psrah %[ftmp0], %[ftmp3], %[ftmp8] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "ldc1 %[ftmp3], 0x00($29) \n\t" + "dmtc1 %[tmp5], %[ftmp7] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp3] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp3] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp2] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp2] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "sdc1 %[ftmp3], 0x00($29) \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" + "sdc1 %[ftmp0], 0x10($29) \n\t" + "dmfc1 %[tmp1], %[ftmp2] \n\t" + "xor %[ftmp2], %[ftmp2], %[ftmp2] \n\t" + "sdc1 %[ftmp2], 0x00(%[block]) \n\t" + "sdc1 %[ftmp2], 0x08(%[block]) \n\t" + "sdc1 %[ftmp2], 0x10(%[block]) \n\t" + "sdc1 %[ftmp2], 0x18(%[block]) \n\t" + "sdc1 %[ftmp2], 0x20(%[block]) \n\t" + "sdc1 %[ftmp2], 0x28(%[block]) \n\t" + "sdc1 %[ftmp2], 0x30(%[block]) \n\t" + "sdc1 %[ftmp2], 0x38(%[block]) \n\t" + "sdc1 %[ftmp2], 0x40(%[block]) \n\t" + "sdc1 %[ftmp2], 0x48(%[block]) \n\t" + "sdc1 %[ftmp2], 0x50(%[block]) \n\t" + "sdc1 %[ftmp2], 0x58(%[block]) \n\t" + "sdc1 %[ftmp2], 0x60(%[block]) \n\t" + "sdc1 %[ftmp2], 0x68(%[block]) \n\t" + "sdc1 %[ftmp2], 0x70(%[block]) \n\t" + "sdc1 %[ftmp2], 0x78(%[block]) \n\t" + "dli %[tmp3], 0x06 \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp3] \n\t" + "mtc1 %[tmp3], %[ftmp10] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp10] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp10] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp3] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp6], %[ftmp6], %[ftmp10] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp10] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "ldc1 %[ftmp5], 0x00($29) \n\t" + "ldc1 %[ftmp4], 0x10($29) \n\t" + "dmtc1 %[tmp1], %[ftmp6] \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp3] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp7], %[ftmp7], %[ftmp10] \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp10] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp5] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + PTR_ADDU "%[dst], %[dst], %[stride] \n\t" + "uld %[low32], 0x00(%[dst]) \n\t" + "mtc1 %[low32], %[ftmp3] \n\t" + "gslwxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp10] \n\t" + "psrah %[ftmp6], %[ftmp6], %[ftmp10] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "punpcklbh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp6] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "packushb %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" + "gsswxc1 %[ftmp0], 0x00(%[dst], %[stride]) \n\t" + "dmtc1 %[tmp4], %[ftmp1] \n\t" + "dmtc1 %[tmp2], %[ftmp6] \n\t" + "ldc1 %[ftmp4], 0x18($29) \n\t" + "mov.d %[ftmp5], %[ftmp4] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp8] \n\t" + "psrah %[ftmp7], %[ftmp11], %[ftmp8] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp11] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp15] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp11] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp1] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" + "psubh %[ftmp3], %[ftmp11], %[ftmp1] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp15] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp15] \n\t" + "psrah %[ftmp2], %[ftmp15], %[ftmp8] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "mov.d %[ftmp2], %[ftmp4] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "psrah %[ftmp1], %[ftmp7], %[ftmp9] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp9] \n\t" + "psrah %[ftmp3], %[ftmp3], %[ftmp9] \n\t" + "psubh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" + "mov.d %[ftmp3], %[ftmp13] \n\t" + "psrah %[ftmp0], %[ftmp13], %[ftmp8] \n\t" + "psrah %[ftmp7], %[ftmp6], %[ftmp8] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp6] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp3] \n\t" + "ldc1 %[ftmp6], 0x08($29) \n\t" + "dmtc1 %[tmp6], %[ftmp3] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp6] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp3] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp6] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp7] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "paddh %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "psubh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp6] \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" + "psubh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp3] \n\t" + "sdc1 %[ftmp6], 0x08($29) \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" + "sdc1 %[ftmp7], 0x18($29) \n\t" + "dmfc1 %[tmp2], %[ftmp0] \n\t" + "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "uld %[low32], 0x00(%[addr0]) \n\t" + "mtc1 %[low32], %[ftmp6] \n\t" + "gslwxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + "psrah %[ftmp2], %[ftmp2], %[ftmp10] \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp10] \n\t" + "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp2] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "packushb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "packushb %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "gsswlc1 %[ftmp6], 0x03(%[addr0]) \n\t" + "gsswrc1 %[ftmp6], 0x00(%[addr0]) \n\t" + "gsswxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + "uld %[low32], 0x00(%[addr0]) \n\t" + "mtc1 %[low32], %[ftmp6] \n\t" + "gslwxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp10] \n\t" + "psrah %[ftmp4], %[ftmp4], %[ftmp10] \n\t" + "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp1] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp4] \n\t" + "packushb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "packushb %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "gsswlc1 %[ftmp6], 0x03(%[addr0]) \n\t" + "gsswrc1 %[ftmp6], 0x00(%[addr0]) \n\t" + "gsswxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + "ldc1 %[ftmp2], 0x08($29) \n\t" + "ldc1 %[ftmp5], 0x18($29) \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + "dmtc1 %[tmp2], %[ftmp1] \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + "uld %[low32], 0x00(%[addr0]) \n\t" + "mtc1 %[low32], %[ftmp6] \n\t" + "gslwxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + "psrah %[ftmp3], %[ftmp3], %[ftmp10] \n\t" + "psrah %[ftmp2], %[ftmp2], %[ftmp10] \n\t" + "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp3] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp2] \n\t" + "packushb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "packushb %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "gsswlc1 %[ftmp6], 0x03(%[addr0]) \n\t" + "gsswrc1 %[ftmp6], 0x00(%[addr0]) \n\t" + "gsswxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + PTR_ADDU "%[addr0], %[addr0], %[stride] \n\t" + "uld %[low32], 0x00(%[addr0]) \n\t" + "mtc1 %[low32], %[ftmp6] \n\t" + "gslwxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + "psrah %[ftmp5], %[ftmp5], %[ftmp10] \n\t" + "psrah %[ftmp1], %[ftmp1], %[ftmp10] \n\t" + "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "paddh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" + "paddh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" + "packushb %[ftmp6], %[ftmp6], %[ftmp0] \n\t" + "packushb %[ftmp7], %[ftmp7], %[ftmp0] \n\t" + "gsswlc1 %[ftmp6], 0x03(%[addr0]) \n\t" + "gsswrc1 %[ftmp6], 0x00(%[addr0]) \n\t" + "gsswxc1 %[ftmp7], 0x00(%[addr0], %[stride]) \n\t" + PTR_ADDIU "$29, $29, 0x20 \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), + [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), + [ftmp12]"=&f"(ftmp[12]), [ftmp13]"=&f"(ftmp[13]), + [ftmp14]"=&f"(ftmp[14]), [ftmp15]"=&f"(ftmp[15]), + [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]), + [tmp2]"=&r"(tmp[2]), [tmp3]"=&r"(tmp[3]), + [tmp4]"=&r"(tmp[4]), [tmp5]"=&r"(tmp[5]), + [tmp6]"=&r"(tmp[6]), [tmp7]"=&r"(tmp[7]), + [addr0]"=&r"(addr[0]), + [low32]"=&r"(low32) + : [dst]"r"(dst), [block]"r"(block), + [stride]"r"((mips_reg)stride) + : "$29","memory" ); memset(block, 0, 128); @@ -593,91 +661,134 @@ void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride) void ff_h264_idct_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { + int dc = (block[0] + 32) >> 6; + double ftmp[6]; + uint64_t low32; + + block[0] = 0; + __asm__ volatile ( - "lh $8, 0x0(%[block]) \r\n" - "sd $0, 0x0(%[block]) \r\n" - "daddiu $8, $8, 0x20 \r\n" - "daddu $10, %[stride], %[stride] \r\n" - "dsra $8, $8, 0x6 \r\n" - "xor $f2, $f2, $f2 \r\n" - "mtc1 $8, $f0 \r\n" - "pshufh $f0, $f0, $f2 \r\n" - "daddu $8, $10, %[stride] \r\n" - "psubh $f2, $f2, $f0 \r\n" - "packushb $f0, $f0, $f0 \r\n" - "packushb $f2, $f2, $f2 \r\n" - "lwc1 $f4, 0x0(%[dst]) \r\n" - "gslwxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gslwxc1 $f8, 0x0(%[dst], $10) \r\n" - "gslwxc1 $f10, 0x0(%[dst], $8) \r\n" - "paddusb $f4, $f4, $f0 \r\n" - "paddusb $f6, $f6, $f0 \r\n" - "paddusb $f8, $f8, $f0 \r\n" - "paddusb $f10, $f10, $f0 \r\n" - "psubusb $f4, $f4, $f2 \r\n" - "psubusb $f6, $f6, $f2 \r\n" - "psubusb $f8, $f8, $f2 \r\n" - "psubusb $f10, $f10, $f2 \r\n" - "swc1 $f4, 0x0(%[dst]) \r\n" - "gsswxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gsswxc1 $f8, 0x0(%[dst], $10) \r\n" - "gsswxc1 $f10, 0x0(%[dst], $8) \r\n" - ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) - : "$8","$10","$f0","$f2","$f4","$f6","$f8","$f10" + "mtc1 %[dc], %[ftmp5] \n\t" + "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "pshufh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "uld %[low32], 0x00(%[dst0]) \n\t" + "mtc1 %[low32], %[ftmp1] \n\t" + "uld %[low32], 0x00(%[dst1]) \n\t" + "mtc1 %[low32], %[ftmp2] \n\t" + "uld %[low32], 0x00(%[dst2]) \n\t" + "mtc1 %[low32], %[ftmp3] \n\t" + "uld %[low32], 0x00(%[dst3]) \n\t" + "mtc1 %[low32], %[ftmp4] \n\t" + "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "paddsh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "paddsh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" + "paddsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "packushb %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "gsswlc1 %[ftmp1], 0x03(%[dst0]) \n\t" + "gsswrc1 %[ftmp1], 0x00(%[dst0]) \n\t" + "gsswlc1 %[ftmp2], 0x03(%[dst1]) \n\t" + "gsswrc1 %[ftmp2], 0x00(%[dst1]) \n\t" + "gsswlc1 %[ftmp3], 0x03(%[dst2]) \n\t" + "gsswrc1 %[ftmp3], 0x00(%[dst2]) \n\t" + "gsswlc1 %[ftmp4], 0x03(%[dst3]) \n\t" + "gsswrc1 %[ftmp4], 0x00(%[dst3]) \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [low32]"=&r"(low32) + : [dst0]"r"(dst), [dst1]"r"(dst+stride), + [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride), + [dc]"r"(dc) + : "memory" ); } void ff_h264_idct8_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { + int dc = (block[0] + 32) >> 6; + double ftmp[10]; + + block[0] = 0; + __asm__ volatile ( - "lh $8, 0x0(%[block]) \r\n" - "sd $0, 0x0(%[block]) \r\n" - "daddiu $8, $8, 0x20 \r\n" - "daddu $10, %[stride], %[stride] \r\n" - "dsra $8, $8, 0x6 \r\n" - "xor $f2, $f2, $f2 \r\n" - "mtc1 $8, $f0 \r\n" - "pshufh $f0, $f0, $f2 \r\n" - "daddu $8, $10, %[stride] \r\n" - "psubh $f2, $f2, $f0 \r\n" - "packushb $f0, $f0, $f0 \r\n" - "packushb $f2, $f2, $f2 \r\n" - "ldc1 $f4, 0x0(%[dst]) \r\n" - "gsldxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gsldxc1 $f8, 0x0(%[dst], $10) \r\n" - "gsldxc1 $f10, 0x0(%[dst], $8) \r\n" - "paddusb $f4, $f4, $f0 \r\n" - "paddusb $f6, $f6, $f0 \r\n" - "paddusb $f8, $f8, $f0 \r\n" - "paddusb $f10, $f10, $f0 \r\n" - "psubusb $f4, $f4, $f2 \r\n" - "psubusb $f6, $f6, $f2 \r\n" - "psubusb $f8, $f8, $f2 \r\n" - "psubusb $f10, $f10, $f2 \r\n" - "sdc1 $f4, 0x0(%[dst]) \r\n" - "gssdxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gssdxc1 $f8, 0x0(%[dst], $10) \r\n" - "daddu $9, $10, $10 \r\n" - "gssdxc1 $f10, 0x0(%[dst], $8) \r\n" - "daddu %[dst], %[dst], $9 \r\n" - "ldc1 $f4, 0x0(%[dst]) \r\n" - "gsldxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gsldxc1 $f8, 0x0(%[dst], $10) \r\n" - "gsldxc1 $f10, 0x0(%[dst], $8) \r\n" - "paddusb $f4, $f4, $f0 \r\n" - "paddusb $f6, $f6, $f0 \r\n" - "paddusb $f8, $f8, $f0 \r\n" - "paddusb $f10, $f10, $f0 \r\n" - "psubusb $f4, $f4, $f2 \r\n" - "psubusb $f6, $f6, $f2 \r\n" - "psubusb $f8, $f8, $f2 \r\n" - "psubusb $f10, $f10, $f2 \r\n" - "sdc1 $f4, 0x0(%[dst]) \r\n" - "gssdxc1 $f6, 0x0(%[dst], %[stride]) \r\n" - "gssdxc1 $f8, 0x0(%[dst], $10) \r\n" - "gssdxc1 $f10, 0x0(%[dst], $8) \r\n" - ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) - : "$8","$9","$10","$f0","$f2","$f4","$f6","$f8","$f10" + "mtc1 %[dc], %[ftmp5] \n\t" + "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" + "pshufh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" + "ldc1 %[ftmp1], 0x00(%[dst0]) \n\t" + "ldc1 %[ftmp2], 0x00(%[dst1]) \n\t" + "ldc1 %[ftmp3], 0x00(%[dst2]) \n\t" + "ldc1 %[ftmp4], 0x00(%[dst3]) \n\t" + "punpckhbh %[ftmp6], %[ftmp1], %[ftmp0] \n\t" + "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "punpckhbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t" + "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "punpckhbh %[ftmp9], %[ftmp4], %[ftmp0] \n\t" + "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "paddsh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" + "paddsh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "paddsh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "paddsh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddsh %[ftmp8], %[ftmp8], %[ftmp5] \n\t" + "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" + "paddsh %[ftmp9], %[ftmp9], %[ftmp5] \n\t" + "paddsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "packushb %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp8] \n\t" + "packushb %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "sdc1 %[ftmp1], 0x00(%[dst0]) \n\t" + "sdc1 %[ftmp2], 0x00(%[dst1]) \n\t" + "sdc1 %[ftmp3], 0x00(%[dst2]) \n\t" + "sdc1 %[ftmp4], 0x00(%[dst3]) \n\t" + + "ldc1 %[ftmp1], 0x00(%[dst4]) \n\t" + "ldc1 %[ftmp2], 0x00(%[dst5]) \n\t" + "ldc1 %[ftmp3], 0x00(%[dst6]) \n\t" + "ldc1 %[ftmp4], 0x00(%[dst7]) \n\t" + "punpckhbh %[ftmp6], %[ftmp1], %[ftmp0] \n\t" + "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "punpckhbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t" + "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" + "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "punpckhbh %[ftmp9], %[ftmp4], %[ftmp0] \n\t" + "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" + "paddsh %[ftmp6], %[ftmp6], %[ftmp5] \n\t" + "paddsh %[ftmp1], %[ftmp1], %[ftmp5] \n\t" + "paddsh %[ftmp7], %[ftmp7], %[ftmp5] \n\t" + "paddsh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "paddsh %[ftmp8], %[ftmp8], %[ftmp5] \n\t" + "paddsh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" + "paddsh %[ftmp9], %[ftmp9], %[ftmp5] \n\t" + "paddsh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "packushb %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "packushb %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "packushb %[ftmp3], %[ftmp3], %[ftmp8] \n\t" + "packushb %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "sdc1 %[ftmp1], 0x00(%[dst4]) \n\t" + "sdc1 %[ftmp2], 0x00(%[dst5]) \n\t" + "sdc1 %[ftmp3], 0x00(%[dst6]) \n\t" + "sdc1 %[ftmp4], 0x00(%[dst7]) \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]) + : [dst0]"r"(dst), [dst1]"r"(dst+stride), + [dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride), + [dst4]"r"(dst+4*stride), [dst5]"r"(dst+5*stride), + [dst6]"r"(dst+6*stride), [dst7]"r"(dst+7*stride), + [dc]"r"(dc) + : "memory" ); } @@ -775,212 +886,222 @@ void ff_h264_idct_add8_422_8_mmi(uint8_t **dest, const int *block_offset, void ff_h264_luma_dc_dequant_idct_8_mmi(int16_t *output, int16_t *input, int qmul) { + double ftmp[10]; + uint64_t tmp[2]; + __asm__ volatile ( - ".set noreorder \r\n" - "dli $10, 0x8 \r\n" - "ldc1 $f6, 0x18(%[input]) \r\n" - "dmtc1 $10, $f16 \r\n" - "ldc1 $f4, 0x10(%[input]) \r\n" - "dli $10, 0x20 \r\n" - "ldc1 $f2, 0x8(%[input]) \r\n" - "dmtc1 $10, $f18 \r\n" - "ldc1 $f0, 0x0(%[input]) \r\n" - "mov.d $f8, $f6 \r\n" - "paddh $f6, $f6, $f4 \r\n" - "psubh $f4, $f4, $f8 \r\n" - "mov.d $f8, $f2 \r\n" - "paddh $f2, $f2, $f0 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "mov.d $f8, $f6 \r\n" - "paddh $f6, $f6, $f2 \r\n" - "psubh $f2, $f2, $f8 \r\n" - "mov.d $f8, $f4 \r\n" - "paddh $f4, $f4, $f0 \r\n" - "psubh $f0, $f0, $f8 \r\n" - "mov.d $f8, $f6 \r\n" - "punpcklhw $f6, $f6, $f2 \r\n" - "punpckhhw $f8, $f8, $f2 \r\n" - "punpckhhw $f2, $f0, $f4 \r\n" - "punpcklhw $f0, $f0, $f4 \r\n" - "punpckhwd $f4, $f6, $f0 \r\n" - "punpcklwd $f6, $f6, $f0 \r\n" - "mov.d $f0, $f8 \r\n" - "punpcklwd $f8, $f8, $f2 \r\n" - "punpckhwd $f0, $f0, $f2 \r\n" - "mov.d $f2, $f0 \r\n" - "paddh $f0, $f0, $f8 \r\n" - "psubh $f8, $f8, $f2 \r\n" - "mov.d $f2, $f4 \r\n" - "paddh $f4, $f4, $f6 \r\n" - "psubh $f6, $f6, $f2 \r\n" - "mov.d $f2, $f0 \r\n" - "paddh $f0, $f0, $f4 \r\n" - "psubh $f4, $f4, $f2 \r\n" - "mov.d $f2, $f8 \r\n" - "daddiu $10, %[qmul], -0x7fff \r\n" - "paddh $f8, $f8, $f6 \r\n" - "bgtz $10, 1f \r\n" - "psubh $f6, $f6, $f2 \r\n" - "ori $10, $0, 0x80 \r\n" - "dsll $10, $10, 0x10 \r\n" - "punpckhhw $f2, $f0, %[ff_pw_1] \r\n" - "daddu %[qmul], %[qmul], $10 \r\n" - "punpcklhw $f0, $f0, %[ff_pw_1] \r\n" - "punpckhhw $f10, $f4, %[ff_pw_1] \r\n" - "punpcklhw $f4, $f4, %[ff_pw_1] \r\n" - "mtc1 %[qmul], $f14 \r\n" - "punpcklwd $f14, $f14, $f14 \r\n" - "pmaddhw $f0, $f0, $f14 \r\n" - "pmaddhw $f4, $f4, $f14 \r\n" - "pmaddhw $f2, $f2, $f14 \r\n" - "pmaddhw $f10, $f10, $f14 \r\n" - "psraw $f0, $f0, $f16 \r\n" - "psraw $f4, $f4, $f16 \r\n" - "psraw $f2, $f2, $f16 \r\n" - "psraw $f10, $f10, $f16 \r\n" - "packsswh $f0, $f0, $f2 \r\n" - "packsswh $f4, $f4, $f10 \r\n" - "mfc1 $9, $f0 \r\n" - "dsrl $f0, $f0, $f18 \r\n" - "mfc1 %[input], $f0 \r\n" - "sh $9, 0x0(%[output]) \r\n" - "sh %[input], 0x80(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x20(%[output]) \r\n" - "sh %[input], 0xa0(%[output]) \r\n" - "mfc1 $9, $f4 \r\n" - "dsrl $f4, $f4, $f18 \r\n" - "mfc1 %[input], $f4 \r\n" - "sh $9, 0x40(%[output]) \r\n" - "sh %[input], 0xc0(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x60(%[output]) \r\n" - "sh %[input], 0xe0(%[output]) \r\n" - "punpckhhw $f2, $f6, %[ff_pw_1] \r\n" - "punpcklhw $f6, $f6, %[ff_pw_1] \r\n" - "punpckhhw $f10, $f8, %[ff_pw_1] \r\n" - "punpcklhw $f8, $f8, %[ff_pw_1] \r\n" - "mtc1 %[qmul], $f14 \r\n" - "punpcklwd $f14, $f14, $f14 \r\n" - "pmaddhw $f6, $f6, $f14 \r\n" - "pmaddhw $f8, $f8, $f14 \r\n" - "pmaddhw $f2, $f2, $f14 \r\n" - "pmaddhw $f10, $f10, $f14 \r\n" - "psraw $f6, $f6, $f16 \r\n" - "psraw $f8, $f8, $f16 \r\n" - "psraw $f2, $f2, $f16 \r\n" - "psraw $f10, $f10, $f16 \r\n" - "packsswh $f6, $f6, $f2 \r\n" - "packsswh $f8, $f8, $f10 \r\n" - "mfc1 $9, $f6 \r\n" - "dsrl $f6, $f6, $f18 \r\n" - "mfc1 %[input], $f6 \r\n" - "sh $9, 0x100(%[output]) \r\n" - "sh %[input], 0x180(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x120(%[output]) \r\n" - "sh %[input], 0x1a0(%[output]) \r\n" - "mfc1 $9, $f8 \r\n" - "dsrl $f8, $f8, $f18 \r\n" - "mfc1 %[input], $f8 \r\n" - "sh $9, 0x140(%[output]) \r\n" - "sh %[input], 0x1c0(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x160(%[output]) \r\n" - "jr $31 \r\n" - "sh %[input], 0x1e0(%[output]) \r\n" - "1: \r\n" - "ori $10, $0, 0x1f \r\n" - "clz $9, %[qmul] \r\n" - "ori %[input], $0, 0x7 \r\n" - "dsubu $9, $10, $9 \r\n" - "ori $10, $0, 0x80 \r\n" - "dsll $10, $10, 0x10 \r\n" - "daddu %[qmul], %[qmul], $10 \r\n" - "dsubu $10, $9, %[input] \r\n" - "movn $9, %[input], $10 \r\n" - "daddiu %[input], %[input], 0x1 \r\n" - "andi $10, $9, 0xff \r\n" - "dsrlv %[qmul], %[qmul], $10 \r\n" - "dsubu %[input], %[input], $9 \r\n" - "mtc1 %[input], $f12 \r\n" - "punpckhhw $f2, $f0, %[ff_pw_1] \r\n" - "punpcklhw $f0, $f0, %[ff_pw_1] \r\n" - "punpckhhw $f10, $f4, %[ff_pw_1] \r\n" - "punpcklhw $f4, $f4, %[ff_pw_1] \r\n" - "mtc1 %[qmul], $f14 \r\n" - "punpcklwd $f14, $f14, $f14 \r\n" - "pmaddhw $f0, $f0, $f14 \r\n" - "pmaddhw $f4, $f4, $f14 \r\n" - "pmaddhw $f2, $f2, $f14 \r\n" - "pmaddhw $f10, $f10, $f14 \r\n" - "psraw $f0, $f0, $f12 \r\n" - "psraw $f4, $f4, $f12 \r\n" - "psraw $f2, $f2, $f12 \r\n" - "psraw $f10, $f10, $f12 \r\n" - "packsswh $f0, $f0, $f2 \r\n" - "packsswh $f4, $f4, $f10 \r\n" - "mfc1 $9, $f0 \r\n" - "dsrl $f0, $f0, $f18 \r\n" - "sh $9, 0x0(%[output]) \r\n" - "mfc1 %[input], $f0 \r\n" - "dsrl $9, $9, 0x10 \r\n" - "sh %[input], 0x80(%[output]) \r\n" - "sh $9, 0x20(%[output]) \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "mfc1 $9, $f4 \r\n" - "sh %[input], 0xa0(%[output]) \r\n" - "dsrl $f4, $f4, $f18 \r\n" - "sh $9, 0x40(%[output]) \r\n" - "mfc1 %[input], $f4 \r\n" - "dsrl $9, $9, 0x10 \r\n" - "sh %[input], 0xc0(%[output]) \r\n" - "sh $9, 0x60(%[output]) \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh %[input], 0xe0(%[output]) \r\n" - "punpckhhw $f2, $f6, %[ff_pw_1] \r\n" - "punpcklhw $f6, $f6, %[ff_pw_1] \r\n" - "punpckhhw $f10, $f8, %[ff_pw_1] \r\n" - "punpcklhw $f8, $f8, %[ff_pw_1] \r\n" - "mtc1 %[qmul], $f14 \r\n" - "punpcklwd $f14, $f14, $f14 \r\n" - "pmaddhw $f6, $f6, $f14 \r\n" - "pmaddhw $f8, $f8, $f14 \r\n" - "pmaddhw $f2, $f2, $f14 \r\n" - "pmaddhw $f10, $f10, $f14 \r\n" - "psraw $f6, $f6, $f12 \r\n" - "psraw $f8, $f8, $f12 \r\n" - "psraw $f2, $f2, $f12 \r\n" - "psraw $f10, $f10, $f12 \r\n" - "packsswh $f6, $f6, $f2 \r\n" - "packsswh $f8, $f8, $f10 \r\n" - "mfc1 $9, $f6 \r\n" - "dsrl $f6, $f6, $f18 \r\n" - "mfc1 %[input], $f6 \r\n" - "sh $9, 0x100(%[output]) \r\n" - "sh %[input], 0x180(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x120(%[output]) \r\n" - "sh %[input], 0x1a0(%[output]) \r\n" - "mfc1 $9, $f8 \r\n" - "dsrl $f8, $f8, $f18 \r\n" - "mfc1 %[input], $f8 \r\n" - "sh $9, 0x140(%[output]) \r\n" - "sh %[input], 0x1c0(%[output]) \r\n" - "dsrl $9, $9, 0x10 \r\n" - "dsrl %[input], %[input], 0x10 \r\n" - "sh $9, 0x160(%[output]) \r\n" - "sh %[input], 0x1e0(%[output]) \r\n" - ".set reorder \r\n" - ::[output]"r"(output),[input]"r"(input),[qmul]"r"((uint64_t)qmul), - [ff_pw_1]"f"(ff_pw_1) - : "$9","$10","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16", - "$f18" + ".set noreorder \n\t" + "dli %[tmp0], 0x08 \n\t" + "ldc1 %[ftmp3], 0x18(%[input]) \n\t" + "mtc1 %[tmp0], %[ftmp8] \n\t" + "ldc1 %[ftmp2], 0x10(%[input]) \n\t" + "dli %[tmp0], 0x20 \n\t" + "ldc1 %[ftmp1], 0x08(%[input]) \n\t" + "mtc1 %[tmp0], %[ftmp9] \n\t" + "ldc1 %[ftmp0], 0x00(%[input]) \n\t" + "mov.d %[ftmp4], %[ftmp3] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp2] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" + "mov.d %[ftmp4], %[ftmp1] \n\t" + "paddh %[ftmp1], %[ftmp1], %[ftmp0] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "mov.d %[ftmp4], %[ftmp3] \n\t" + "paddh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "psubh %[ftmp1], %[ftmp1], %[ftmp4] \n\t" + "mov.d %[ftmp4], %[ftmp2] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" + "psubh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "mov.d %[ftmp4], %[ftmp3] \n\t" + "punpcklhw %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "punpckhhw %[ftmp4], %[ftmp4], %[ftmp1] \n\t" + "punpckhhw %[ftmp1], %[ftmp0], %[ftmp2] \n\t" + "punpcklhw %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "punpckhwd %[ftmp2], %[ftmp3], %[ftmp0] \n\t" + "punpcklwd %[ftmp3], %[ftmp3], %[ftmp0] \n\t" + "mov.d %[ftmp0], %[ftmp4] \n\t" + "punpcklwd %[ftmp4], %[ftmp4], %[ftmp1] \n\t" + "punpckhwd %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "mov.d %[ftmp1], %[ftmp0] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp4] \n\t" + "psubh %[ftmp4], %[ftmp4], %[ftmp1] \n\t" + "mov.d %[ftmp1], %[ftmp2] \n\t" + "paddh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "mov.d %[ftmp1], %[ftmp0] \n\t" + "paddh %[ftmp0], %[ftmp0], %[ftmp2] \n\t" + "psubh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" + "mov.d %[ftmp1], %[ftmp4] \n\t" + "daddi %[tmp0], %[qmul], -0x7fff \n\t" + "paddh %[ftmp4], %[ftmp4], %[ftmp3] \n\t" + "bgtz %[tmp0], 1f \n\t" + "psubh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "ori %[tmp0], $0, 0x80 \n\t" + "dsll %[tmp0], %[tmp0], 0x10 \n\t" + "punpckhhw %[ftmp1], %[ftmp0], %[ff_pw_1] \n\t" + "daddu %[qmul], %[qmul], %[tmp0] \n\t" + "punpcklhw %[ftmp0], %[ftmp0], %[ff_pw_1] \n\t" + "punpckhhw %[ftmp5], %[ftmp2], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp2], %[ftmp2], %[ff_pw_1] \n\t" + "mtc1 %[qmul], %[ftmp7] \n\t" + "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "pmaddhw %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "pmaddhw %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "pmaddhw %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "pmaddhw %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psraw %[ftmp0], %[ftmp0], %[ftmp8] \n\t" + "psraw %[ftmp2], %[ftmp2], %[ftmp8] \n\t" + "psraw %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "psraw %[ftmp5], %[ftmp5], %[ftmp8] \n\t" + "packsswh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "packsswh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "dmfc1 %[tmp1], %[ftmp0] \n\t" + "dsrl %[ftmp0], %[ftmp0], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp0] \n\t" + "sh %[tmp1], 0x00(%[output]) \n\t" + "sh %[input], 0x80(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x20(%[output]) \n\t" + "sh %[input], 0xa0(%[output]) \n\t" + "dmfc1 %[tmp1], %[ftmp2] \n\t" + "dsrl %[ftmp2], %[ftmp2], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp2] \n\t" + "sh %[tmp1], 0x40(%[output]) \n\t" + "sh %[input], 0xc0(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x60(%[output]) \n\t" + "sh %[input], 0xe0(%[output]) \n\t" + "punpckhhw %[ftmp1], %[ftmp3], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp3], %[ftmp3], %[ff_pw_1] \n\t" + "punpckhhw %[ftmp5], %[ftmp4], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp4], %[ftmp4], %[ff_pw_1] \n\t" + "mtc1 %[qmul], %[ftmp7] \n\t" + "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "pmaddhw %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "pmaddhw %[ftmp4], %[ftmp4], %[ftmp7] \n\t" + "pmaddhw %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "pmaddhw %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psraw %[ftmp3], %[ftmp3], %[ftmp8] \n\t" + "psraw %[ftmp4], %[ftmp4], %[ftmp8] \n\t" + "psraw %[ftmp1], %[ftmp1], %[ftmp8] \n\t" + "psraw %[ftmp5], %[ftmp5], %[ftmp8] \n\t" + "packsswh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "packsswh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "dmfc1 %[tmp1], %[ftmp3] \n\t" + "dsrl %[ftmp3], %[ftmp3], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp3] \n\t" + "sh %[tmp1], 0x100(%[output]) \n\t" + "sh %[input], 0x180(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x120(%[output]) \n\t" + "sh %[input], 0x1a0(%[output]) \n\t" + "dmfc1 %[tmp1], %[ftmp4] \n\t" + "dsrl %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp4] \n\t" + "sh %[tmp1], 0x140(%[output]) \n\t" + "sh %[input], 0x1c0(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x160(%[output]) \n\t" + "j 2f \n\t" + "sh %[input], 0x1e0(%[output]) \n\t" + "1: \n\t" + "ori %[tmp0], $0, 0x1f \n\t" + "clz %[tmp1], %[qmul] \n\t" + "ori %[input], $0, 0x07 \n\t" + "dsubu %[tmp1], %[tmp0], %[tmp1] \n\t" + "ori %[tmp0], $0, 0x80 \n\t" + "dsll %[tmp0], %[tmp0], 0x10 \n\t" + "daddu %[qmul], %[qmul], %[tmp0] \n\t" + "dsubu %[tmp0], %[tmp1], %[input] \n\t" + "movn %[tmp1], %[input], %[tmp0] \n\t" + PTR_ADDIU "%[input], %[input], 0x01 \n\t" + "andi %[tmp0], %[tmp1], 0xff \n\t" + "srlv %[qmul], %[qmul], %[tmp0] \n\t" + PTR_SUBU "%[input], %[input], %[tmp1] \n\t" + "mtc1 %[input], %[ftmp6] \n\t" + "punpckhhw %[ftmp1], %[ftmp0], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp0], %[ftmp0], %[ff_pw_1] \n\t" + "punpckhhw %[ftmp5], %[ftmp2], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp2], %[ftmp2], %[ff_pw_1] \n\t" + "mtc1 %[qmul], %[ftmp7] \n\t" + "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "pmaddhw %[ftmp0], %[ftmp0], %[ftmp7] \n\t" + "pmaddhw %[ftmp2], %[ftmp2], %[ftmp7] \n\t" + "pmaddhw %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "pmaddhw %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psraw %[ftmp0], %[ftmp0], %[ftmp6] \n\t" + "psraw %[ftmp2], %[ftmp2], %[ftmp6] \n\t" + "psraw %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "psraw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" + "packsswh %[ftmp0], %[ftmp0], %[ftmp1] \n\t" + "packsswh %[ftmp2], %[ftmp2], %[ftmp5] \n\t" + "dmfc1 %[tmp1], %[ftmp0] \n\t" + "dsrl %[ftmp0], %[ftmp0], %[ftmp9] \n\t" + "sh %[tmp1], 0x00(%[output]) \n\t" + "mfc1 %[input], %[ftmp0] \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + "sh %[input], 0x80(%[output]) \n\t" + "sh %[tmp1], 0x20(%[output]) \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "dmfc1 %[tmp1], %[ftmp2] \n\t" + "sh %[input], 0xa0(%[output]) \n\t" + "dsrl %[ftmp2], %[ftmp2], %[ftmp9] \n\t" + "sh %[tmp1], 0x40(%[output]) \n\t" + "mfc1 %[input], %[ftmp2] \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + "sh %[input], 0xc0(%[output]) \n\t" + "sh %[tmp1], 0x60(%[output]) \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[input], 0xe0(%[output]) \n\t" + "punpckhhw %[ftmp1], %[ftmp3], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp3], %[ftmp3], %[ff_pw_1] \n\t" + "punpckhhw %[ftmp5], %[ftmp4], %[ff_pw_1] \n\t" + "punpcklhw %[ftmp4], %[ftmp4], %[ff_pw_1] \n\t" + "mtc1 %[qmul], %[ftmp7] \n\t" + "punpcklwd %[ftmp7], %[ftmp7], %[ftmp7] \n\t" + "pmaddhw %[ftmp3], %[ftmp3], %[ftmp7] \n\t" + "pmaddhw %[ftmp4], %[ftmp4], %[ftmp7] \n\t" + "pmaddhw %[ftmp1], %[ftmp1], %[ftmp7] \n\t" + "pmaddhw %[ftmp5], %[ftmp5], %[ftmp7] \n\t" + "psraw %[ftmp3], %[ftmp3], %[ftmp6] \n\t" + "psraw %[ftmp4], %[ftmp4], %[ftmp6] \n\t" + "psraw %[ftmp1], %[ftmp1], %[ftmp6] \n\t" + "psraw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" + "packsswh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" + "packsswh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" + "dmfc1 %[tmp1], %[ftmp3] \n\t" + "dsrl %[ftmp3], %[ftmp3], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp3] \n\t" + "sh %[tmp1], 0x100(%[output]) \n\t" + "sh %[input], 0x180(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x120(%[output]) \n\t" + "sh %[input], 0x1a0(%[output]) \n\t" + "dmfc1 %[tmp1], %[ftmp4] \n\t" + "dsrl %[ftmp4], %[ftmp4], %[ftmp9] \n\t" + "mfc1 %[input], %[ftmp4] \n\t" + "sh %[tmp1], 0x140(%[output]) \n\t" + "sh %[input], 0x1c0(%[output]) \n\t" + "dsrl %[tmp1], %[tmp1], 0x10 \n\t" + PTR_SRL "%[input], %[input], 0x10 \n\t" + "sh %[tmp1], 0x160(%[output]) \n\t" + "sh %[input], 0x1e0(%[output]) \n\t" + "2: \n\t" + ".set reorder \n\t" + : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), + [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), + [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), + [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), + [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), + [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]), + [output]"+&r"(output), [input]"+&r"(input), + [qmul]"+&r"(qmul) + : [ff_pw_1]"f"(ff_pw_1) + : "memory" ); } @@ -1031,10 +1152,11 @@ void ff_h264_chroma_dc_dequant_idct_8_mmi(int16_t *block, int qmul) block[48]= ((d-b)*qmul) >> 7; } -void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, - int height, int log2_denom, int weight, int offset) +void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height, + int log2_denom, int weight, int offset) { int y; + double ftmp[8]; offset <<= log2_denom; @@ -1043,97 +1165,110 @@ void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, for (y=0; y