1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-02-14 22:22:59 +02:00

avcodec/mips: Improve avc chroma vert mc msa functions

Replace generic with block size specific function.

Signed-off-by: Kaustubh Raste <kaustubh.raste@imgtec.com>
Reviewed-by: Manojkumar Bhosale <Manojkumar.Bhosale@imgtec.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
This commit is contained in:
Kaustubh Raste 2017-09-26 10:56:27 +05:30 committed by Michael Niedermayer
parent 10ab5534e0
commit b8854e2439

View File

@ -302,8 +302,7 @@ static void avc_chroma_hz_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride,
}
}
static void avc_chroma_vt_2x2_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avc_chroma_vt_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
uint16_t out0, out1;
@ -315,7 +314,7 @@ static void avc_chroma_vt_2x2_msa(uint8_t *src, int32_t src_stride,
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_SB3(src, src_stride, src0, src1, src2);
LD_SB3(src, stride, src0, src1, src2);
ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
@ -331,12 +330,11 @@ static void avc_chroma_vt_2x2_msa(uint8_t *src, int32_t src_stride,
out1 = __msa_copy_u_h(res, 2);
SH(out0, dst);
dst += dst_stride;
dst += stride;
SH(out1, dst);
}
static void avc_chroma_vt_2x4_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avc_chroma_vt_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
v16u8 src0, src1, src2, src3, src4;
@ -347,7 +345,7 @@ static void avc_chroma_vt_2x4_msa(uint8_t *src, int32_t src_stride,
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
LD_UB5(src, stride, src0, src1, src2, src3, src4);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
@ -361,74 +359,21 @@ static void avc_chroma_vt_2x4_msa(uint8_t *src, int32_t src_stride,
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
ST2x4_UB(res, 0, dst, dst_stride);
ST2x4_UB(res, 0, dst, stride);
}
static void avc_chroma_vt_2x8_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1)
{
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 tmp0, tmp1, tmp2, tmp3;
v8i16 res;
v8u16 res_r;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
src += (5 * src_stride);
LD_UB4(src, src_stride, src5, src6, src7, src8);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
res_r = __msa_dotp_u_h(tmp0, coeff_vec);
res_r <<= 3;
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
ST2x4_UB(res, 0, dst, dst_stride);
dst += (4 * dst_stride);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
tmp0, tmp1, tmp2, tmp3);
ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
res_r = __msa_dotp_u_h(tmp0, coeff_vec);
res_r <<= 3;
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
ST2x4_UB(res, 0, dst, dst_stride);
dst += (4 * dst_stride);
}
static void avc_chroma_vt_2w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avc_chroma_vt_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
{
if (2 == height) {
avc_chroma_vt_2x2_msa(src, src_stride, dst, dst_stride, coeff0, coeff1);
avc_chroma_vt_2x2_msa(src, dst, stride, coeff0, coeff1);
} else if (4 == height) {
avc_chroma_vt_2x4_msa(src, src_stride, dst, dst_stride, coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_2x8_msa(src, src_stride, dst, dst_stride, coeff0, coeff1);
avc_chroma_vt_2x4_msa(src, dst, stride, coeff0, coeff1);
}
}
static void avc_chroma_vt_4x2_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avc_chroma_vt_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
v16u8 src0, src1, src2;
@ -439,7 +384,7 @@ static void avc_chroma_vt_4x2_msa(uint8_t *src, int32_t src_stride,
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_UB3(src, src_stride, src0, src1, src2);
LD_UB3(src, stride, src0, src1, src2);
ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
@ -449,93 +394,135 @@ static void avc_chroma_vt_4x2_msa(uint8_t *src, int32_t src_stride,
res_r = __msa_sat_u_h(res_r, 7);
res = (v4i32) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
ST4x2_UB(res, dst, dst_stride);
ST4x2_UB(res, dst, stride);
}
static void avc_chroma_vt_4x4multiple_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
uint32_t row;
v16u8 src0, src1, src2, src3, src4;
v16u8 tmp0, tmp1, tmp2, tmp3;
v16u8 out;
v8u16 res0_r, res1_r;
v4i32 res0, res1;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
src0 = LD_UB(src);
src += src_stride;
for (row = (height >> 2); row--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
res0_r <<= 3;
res1_r <<= 3;
SRARI_H2_UH(res0_r, res1_r, 6);
SAT_UH2_UH(res0_r, res1_r, 7);
PCKEV_B2_SW(res0_r, res0_r, res1_r, res1_r, res0, res1);
ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
dst += (4 * dst_stride);
src0 = src4;
}
LD_UB5(src, stride, src0, src1, src2, src3, src4);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
tmp3);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
res0_r <<= 3;
res1_r <<= 3;
SRARI_H2_UH(res0_r, res1_r, 6);
SAT_UH2_UH(res0_r, res1_r, 7);
out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
}
static void avc_chroma_vt_4w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avc_chroma_vt_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, out0, out1;
v8u16 res0, res1, res2, res3;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_UB5(src, stride, src0, src1, src2, src3, src4);
src += (5 * stride);
LD_UB4(src, stride, src5, src6, src7, src8);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
tmp3);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, tmp4, tmp5, tmp6,
tmp7);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
ILVR_D2_UB(tmp5, tmp4, tmp7, tmp6, tmp4, tmp6);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0, res1);
DOTP_UB2_UH(tmp4, tmp6, coeff_vec, coeff_vec, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
ST4x8_UB(out0, out1, dst, stride);
}
static void avc_chroma_vt_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
{
if (2 == height) {
avc_chroma_vt_4x2_msa(src, src_stride, dst, dst_stride, coeff0, coeff1);
} else {
avc_chroma_vt_4x4multiple_msa(src, src_stride, dst, dst_stride, coeff0,
coeff1, height);
avc_chroma_vt_4x2_msa(src, dst, stride, coeff0, coeff1);
} else if (4 == height) {
avc_chroma_vt_4x4_msa(src, dst, stride, coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_4x8_msa(src, dst, stride, coeff0, coeff1);
}
}
static void avc_chroma_vt_8w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
uint32_t row;
v16u8 src0, src1, src2, src3, src4, out0, out1;
v8u16 res0, res1, res2, res3;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
src0 = LD_UB(src);
src += src_stride;
LD_UB5(src, stride, src0, src1, src2, src3, src4);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
src3);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
ST8x4_UB(out0, out1, dst, stride);
}
for (row = height >> 2; row--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
static void avc_chroma_vt_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1)
{
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 out0, out1, out2, out3;
v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
src0, src1, src2, src3);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
LD_UB5(src, stride, src0, src1, src2, src3, src4);
src += (5 * stride);
LD_UB4(src, stride, src5, src6, src7, src8);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
src3);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, src4, src5, src6,
src7);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res4, res5, res6, res7);
SLLI_4V(res0, res1, res2, res3, 3);
SLLI_4V(res4, res5, res6, res7, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SRARI_H4_UH(res4, res5, res6, res7, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
ST8x8_UB(out0, out1, out2, out3, dst, stride);
}
ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
src0 = src4;
static void avc_chroma_vt_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
{
if (4 == height) {
avc_chroma_vt_8x4_msa(src, dst, stride, coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_8x8_msa(src, dst, stride, coeff0, coeff1);
}
}
@ -1914,7 +1901,7 @@ void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_8w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_8w_msa(src, stride, dst, stride, y, (8 - y), height);
avc_chroma_vt_8w_msa(src, dst, stride, y, (8 - y), height);
} else {
copy_width8_msa(src, stride, dst, stride, height);
}
@ -1933,7 +1920,7 @@ void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_4w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_4w_msa(src, stride, dst, stride, y, (8 - y), height);
avc_chroma_vt_4w_msa(src, dst, stride, y, (8 - y), height);
} else {
for (cnt = height; cnt--;) {
*((uint32_t *) dst) = *((uint32_t *) src);
@ -1957,7 +1944,7 @@ void ff_put_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_2w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_2w_msa(src, stride, dst, stride, y, (8 - y), height);
avc_chroma_vt_2w_msa(src, dst, stride, y, (8 - y), height);
} else {
for (cnt = height; cnt--;) {
*((uint16_t *) dst) = *((uint16_t *) src);