1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avcodec/mips: Improve avc chroma copy and avg vert mc msa functions

Replace generic with block size specific function.
Load the specific destination bytes instead of MSA load and pack.

Signed-off-by: Kaustubh Raste <kaustubh.raste@imgtec.com>
Reviewed-by: Manojkumar Bhosale <Manojkumar.Bhosale@imgtec.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
This commit is contained in:
Kaustubh Raste 2017-10-24 12:40:42 +05:30 committed by Michael Niedermayer
parent 2aab7c2dfa
commit ce0a52e9e9

View File

@ -1124,24 +1124,25 @@ static void avc_chroma_hz_and_aver_dst_8w_msa(uint8_t *src, uint8_t *dst,
}
}
static void avc_chroma_vt_and_aver_dst_2x2_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_2x2_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint16_t out0, out1;
uint32_t load0, load1;
v16i8 src0, src1, src2, tmp0, tmp1, res;
v16u8 dst_data = { 0 };
v8i16 out;
v8u16 res_r;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_SB3(src, src_stride, src0, src1, src2);
load0 = LW(dst);
load1 = LW(dst + dst_stride);
LD_SB3(src, stride, src0, src1, src2);
out0 = LH(dst);
out1 = LH(dst + stride);
INSERT_W2_UB(load0, load1, dst_data);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, out0);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, out1);
ILVR_B2_SB(src1, src0, src2, src1, tmp0, tmp1);
@ -1151,20 +1152,20 @@ static void avc_chroma_vt_and_aver_dst_2x2_msa(uint8_t *src, int32_t src_stride,
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
dst_data = __msa_aver_u_b((v16u8) res, dst_data);
out0 = __msa_copy_u_h((v8i16) dst_data, 0);
out1 = __msa_copy_u_h((v8i16) dst_data, 2);
out = (v8i16) __msa_aver_u_b((v16u8) res, dst_data);
out0 = __msa_copy_u_h(out, 0);
out1 = __msa_copy_u_h(out, 2);
SH(out0, dst);
dst += dst_stride;
dst += stride;
SH(out1, dst);
}
static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint32_t load0, load1;
uint16_t tp0, tp1, tp2, tp3;
v16i8 src0, src1, src2, src3, src4;
v16u8 tmp0, tmp1, tmp2, tmp3;
v8u16 res_r;
@ -1174,19 +1175,16 @@ static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride,
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
v16u8 dst_data = { 0 };
LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
LD_SB5(src, stride, src0, src1, src2, src3, src4);
load0 = LW(dst);
load1 = LW(dst + dst_stride);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, load0);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 1, load1);
load0 = LW(dst + 2 * dst_stride);
load1 = LW(dst + 3 * dst_stride);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, load0);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 3, load1);
tp0 = LH(dst);
tp1 = LH(dst + stride);
tp2 = LH(dst + 2 * stride);
tp3 = LH(dst + 3 * stride);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, tp0);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 1, tp1);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, tp2);
dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 3, tp3);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
@ -1202,102 +1200,26 @@ static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, int32_t src_stride,
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data);
ST2x4_UB(res, 0, dst, dst_stride);
dst += (4 * dst_stride);
ST2x4_UB(res, 0, dst, stride);
}
static void avc_chroma_vt_and_aver_dst_2x8_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1)
{
uint32_t load0, load1, load2, load3;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 tmp0, tmp1, tmp2, tmp3;
v8i16 res;
v8u16 res_r;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
v16u8 dst_data0 = { 0 };
v16u8 dst_data1 = { 0 };
LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
src += (5 * src_stride);
LD_SB4(src, src_stride, src5, src6, src7, src8);
LW4(dst, dst_stride, load0, load1, load2, load3);
dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 0, load0);
dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 1, load1);
dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 2, load2);
dst_data0 = (v16u8) __msa_insert_h((v8i16) dst_data0, 3, load3);
LW4(dst + 4 * dst_stride, dst_stride, load0, load1, load2, load3);
dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 0, load0);
dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 1, load1);
dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 2, load2);
dst_data1 = (v16u8) __msa_insert_h((v8i16) dst_data1, 3, load3);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
res_r = __msa_dotp_u_h(tmp0, coeff_vec);
res_r <<= 3;
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data0);
ST2x4_UB(res, 0, dst, dst_stride);
dst += (4 * dst_stride);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
tmp0, tmp1, tmp2, tmp3);
ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
res_r = __msa_dotp_u_h(tmp0, coeff_vec);
res_r <<= 3;
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data1);
ST2x4_UB(res, 0, dst, dst_stride);
}
static void avc_chroma_vt_and_aver_dst_2w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_and_aver_dst_2w_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1, int32_t height)
{
if (2 == height) {
avc_chroma_vt_and_aver_dst_2x2_msa(src, src_stride, dst, dst_stride,
coeff0, coeff1);
avc_chroma_vt_and_aver_dst_2x2_msa(src, dst, stride, coeff0, coeff1);
} else if (4 == height) {
avc_chroma_vt_and_aver_dst_2x4_msa(src, src_stride, dst, dst_stride,
coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_and_aver_dst_2x8_msa(src, src_stride, dst, dst_stride,
coeff0, coeff1);
avc_chroma_vt_and_aver_dst_2x4_msa(src, dst, stride, coeff0, coeff1);
}
}
static void avc_chroma_vt_and_aver_dst_4x2_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_and_aver_dst_4x2_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint32_t load0, load1;
v16i8 src0, src1, src2, tmp0, tmp1;
v16u8 src0, src1, src2, tmp0, tmp1;
v16u8 dst_data = { 0 };
v8u16 res_r;
v16u8 res;
@ -1305,128 +1227,184 @@ static void avc_chroma_vt_and_aver_dst_4x2_msa(uint8_t *src, int32_t src_stride,
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_SB3(src, src_stride, src0, src1, src2);
LD_UB3(src, stride, src0, src1, src2);
load0 = LW(dst);
load1 = LW(dst + dst_stride);
LW2(dst, stride, load0, load1);
INSERT_W2_UB(load0, load1, dst_data);
ILVR_B2_SB(src1, src0, src2, src1, tmp0, tmp1);
ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
tmp0 = (v16i8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
res_r = __msa_dotp_u_h((v16u8) tmp0, coeff_vec);
res_r = __msa_dotp_u_h(tmp0, coeff_vec);
res_r <<= 3;
res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
res_r = __msa_sat_u_h(res_r, 7);
res = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
res = __msa_aver_u_b(res, dst_data);
ST4x2_UB(res, dst, dst_stride);
ST4x2_UB(res, dst, stride);
}
static void avc_chroma_vt_and_aver_dst_4x4mul_msa(uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
uint32_t coeff0,
uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_and_aver_dst_4x4_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint32_t load0, load1, row;
v16i8 src0, src1, src2, src3, src4;
uint32_t tp0, tp1, tp2, tp3;
v16u8 src0, src1, src2, src3, src4;
v16u8 tmp0, tmp1, tmp2, tmp3;
v16u8 dst0 = { 0 };
v16u8 dst1 = { 0 };
v8u16 res0_r, res1_r;
v16u8 res0, res1;
v16u8 out;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
src0 = LD_SB(src);
src += src_stride;
for (row = (height >> 2); row--;) {
LD_SB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
load0 = LW(dst);
load1 = LW(dst + dst_stride);
INSERT_W2_UB(load0, load1, dst0);
load0 = LW(dst + 2 * dst_stride);
load1 = LW(dst + 3 * dst_stride);
INSERT_W2_UB(load0, load1, dst1);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
tmp0, tmp1, tmp2, tmp3);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
res0_r <<= 3;
res1_r <<= 3;
SRARI_H2_UH(res0_r, res1_r, 6);
SAT_UH2_UH(res0_r, res1_r, 7);
PCKEV_B2_UB(res0_r, res0_r, res1_r, res1_r, res0, res1);
AVER_UB2_UB(res0, dst0, res1, dst1, res0, res1);
ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
dst += (4 * dst_stride);
src0 = src4;
}
LD_UB5(src, stride, src0, src1, src2, src3, src4);
LW4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
tmp3);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
res0_r <<= 3;
res1_r <<= 3;
SRARI_H2_UH(res0_r, res1_r, 6);
SAT_UH2_UH(res0_r, res1_r, 7);
out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
out = __msa_aver_u_b(out, dst0);
ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
}
static void avc_chroma_vt_and_aver_dst_4w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_and_aver_dst_4x8_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint32_t tp0, tp1, tp2, tp3;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, out0, out1;
v16u8 dst0 = { 0 }, dst1 = { 0 };
v8u16 res0, res1, res2, res3;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
LD_UB5(src, stride, src0, src1, src2, src3, src4);
src += (5 * stride);
LD_UB4(src, stride, src5, src6, src7, src8);
LW4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
tmp3);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, tmp4, tmp5, tmp6,
tmp7);
ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
ILVR_D2_UB(tmp5, tmp4, tmp7, tmp6, tmp4, tmp6);
DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0, res1);
DOTP_UB2_UH(tmp4, tmp6, coeff_vec, coeff_vec, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
ST4x8_UB(out0, out1, dst, stride);
}
static void avc_chroma_vt_and_aver_dst_4w_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1, int32_t height)
{
if (2 == height) {
avc_chroma_vt_and_aver_dst_4x2_msa(src, src_stride, dst, dst_stride,
coeff0, coeff1);
} else {
avc_chroma_vt_and_aver_dst_4x4mul_msa(src, src_stride, dst, dst_stride,
coeff0, coeff1, height);
avc_chroma_vt_and_aver_dst_4x2_msa(src, dst, stride, coeff0, coeff1);
} else if (4 == height) {
avc_chroma_vt_and_aver_dst_4x4_msa(src, dst, stride, coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_and_aver_dst_4x8_msa(src, dst, stride, coeff0, coeff1);
}
}
static void avc_chroma_vt_and_aver_dst_8w_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
uint32_t coeff0, uint32_t coeff1,
int32_t height)
static void avc_chroma_vt_and_aver_dst_8x4_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint32_t row;
uint64_t tp0, tp1, tp2, tp3;
v16u8 src0, src1, src2, src3, src4;
v16u8 out0, out1;
v8u16 res0, res1, res2, res3;
v16u8 dst0, dst1, dst2, dst3;
v16u8 dst0 = { 0 }, dst1 = { 0 };
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
src0 = LD_UB(src);
src += src_stride;
LD_UB5(src, stride, src0, src1, src2, src3, src4);
LD4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_D2_UB(tp0, tp1, dst0);
INSERT_D2_UB(tp2, tp3, dst1);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
src0, src1, src2, src3);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
ST8x4_UB(out0, out1, dst, stride);
}
for (row = height >> 2; row--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
src0, src1, src2, src3);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
SLLI_4V(res0, res1, res2, res3, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
PCKEV_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1);
AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
ST8x4_UB(out0, out1, dst, dst_stride);
static void avc_chroma_vt_and_aver_dst_8x8_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1)
{
uint64_t tp0, tp1, tp2, tp3;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 out0, out1, out2, out3;
v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
v16i8 coeff_vec0 = __msa_fill_b(coeff0);
v16i8 coeff_vec1 = __msa_fill_b(coeff1);
v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
dst += (4 * dst_stride);
src0 = src4;
LD_UB5(src, stride, src0, src1, src2, src3, src4);
src += (5 * stride);
LD_UB4(src, stride, src5, src6, src7, src8);
LD4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_D2_UB(tp0, tp1, dst0);
INSERT_D2_UB(tp2, tp3, dst1);
LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
INSERT_D2_UB(tp0, tp1, dst2);
INSERT_D2_UB(tp2, tp3, dst3);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
src0, src1, src2, src3);
ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
src4, src5, src6, src7);
DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res0, res1, res2, res3);
DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
coeff_vec, res4, res5, res6, res7);
SLLI_4V(res0, res1, res2, res3, 3);
SLLI_4V(res4, res5, res6, res7, 3);
SRARI_H4_UH(res0, res1, res2, res3, 6);
SRARI_H4_UH(res4, res5, res6, res7, 6);
SAT_UH4_UH(res0, res1, res2, res3, 7);
SAT_UH4_UH(res0, res1, res2, res3, 7);
PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
AVER_UB2_UB(out2, dst2, out3, dst3, out2, out3);
ST8x8_UB(out0, out1, out2, out3, dst, stride);
}
static void avc_chroma_vt_and_aver_dst_8w_msa(uint8_t *src, uint8_t *dst,
int32_t stride, uint32_t coeff0,
uint32_t coeff1, int32_t height)
{
if (4 == height) {
avc_chroma_vt_and_aver_dst_8x4_msa(src, dst, stride, coeff0, coeff1);
} else if (8 == height) {
avc_chroma_vt_and_aver_dst_8x8_msa(src, dst, stride, coeff0, coeff1);
}
}
@ -1771,160 +1749,115 @@ static void avc_chroma_hv_and_aver_dst_8w_msa(uint8_t *src, int32_t src_stride,
}
}
static void copy_width8_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void copy_width4_msa(uint8_t *src, uint8_t *dst, int32_t stride,
int32_t height)
{
int32_t cnt;
uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
uint32_t tp0, tp1, tp2, tp3, tp4, tp5, tp6, tp7;
if (0 == height % 12) {
for (cnt = (height / 12); cnt--;) {
LD_UB8(src, src_stride,
src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
out0 = __msa_copy_u_d((v2i64) src0, 0);
out1 = __msa_copy_u_d((v2i64) src1, 0);
out2 = __msa_copy_u_d((v2i64) src2, 0);
out3 = __msa_copy_u_d((v2i64) src3, 0);
out4 = __msa_copy_u_d((v2i64) src4, 0);
out5 = __msa_copy_u_d((v2i64) src5, 0);
out6 = __msa_copy_u_d((v2i64) src6, 0);
out7 = __msa_copy_u_d((v2i64) src7, 0);
SD4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
SD4(out4, out5, out6, out7, dst, dst_stride);
dst += (4 * dst_stride);
LD_UB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
out0 = __msa_copy_u_d((v2i64) src0, 0);
out1 = __msa_copy_u_d((v2i64) src1, 0);
out2 = __msa_copy_u_d((v2i64) src2, 0);
out3 = __msa_copy_u_d((v2i64) src3, 0);
SD4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
}
} else if (0 == height % 8) {
for (cnt = height >> 3; cnt--;) {
LD_UB8(src, src_stride,
src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
out0 = __msa_copy_u_d((v2i64) src0, 0);
out1 = __msa_copy_u_d((v2i64) src1, 0);
out2 = __msa_copy_u_d((v2i64) src2, 0);
out3 = __msa_copy_u_d((v2i64) src3, 0);
out4 = __msa_copy_u_d((v2i64) src4, 0);
out5 = __msa_copy_u_d((v2i64) src5, 0);
out6 = __msa_copy_u_d((v2i64) src6, 0);
out7 = __msa_copy_u_d((v2i64) src7, 0);
SD4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
SD4(out4, out5, out6, out7, dst, dst_stride);
dst += (4 * dst_stride);
}
} else if (0 == height % 4) {
for (cnt = (height / 4); cnt--;) {
LD_UB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
out0 = __msa_copy_u_d((v2i64) src0, 0);
out1 = __msa_copy_u_d((v2i64) src1, 0);
out2 = __msa_copy_u_d((v2i64) src2, 0);
out3 = __msa_copy_u_d((v2i64) src3, 0);
SD4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
}
} else if (0 == height % 2) {
for (cnt = (height / 2); cnt--;) {
LD_UB2(src, src_stride, src0, src1);
src += (2 * src_stride);
out0 = __msa_copy_u_d((v2i64) src0, 0);
out1 = __msa_copy_u_d((v2i64) src1, 0);
SD(out0, dst);
dst += dst_stride;
SD(out1, dst);
dst += dst_stride;
}
if (8 == height) {
LW4(src, stride, tp0, tp1, tp2, tp3);
src += 4 * stride;
LW4(src, stride, tp4, tp5, tp6, tp7);
SW4(tp0, tp1, tp2, tp3, dst, stride);
dst += 4 * stride;
SW4(tp4, tp5, tp6, tp7, dst, stride);
} else if (4 == height) {
LW4(src, stride, tp0, tp1, tp2, tp3);
SW4(tp0, tp1, tp2, tp3, dst, stride);
} else if (2 == height) {
LW2(src, stride, tp0, tp1);
SW(tp0, dst);
dst += stride;
SW(tp1, dst);
}
}
static void avg_width4_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int32_t height)
static void copy_width8_msa(uint8_t *src, uint8_t *dst, int32_t stride,
int32_t height)
{
int32_t cnt;
uint32_t out0, out1, out2, out3;
v16u8 src0, src1, src2, src3;
v16u8 dst0, dst1, dst2, dst3;
uint64_t src0, src1, src2, src3, src4, src5, src6, src7;
if (0 == (height % 4)) {
for (cnt = (height / 4); cnt--;) {
LD_UB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
dst0, dst1, dst2, dst3);
out0 = __msa_copy_u_w((v4i32) dst0, 0);
out1 = __msa_copy_u_w((v4i32) dst1, 0);
out2 = __msa_copy_u_w((v4i32) dst2, 0);
out3 = __msa_copy_u_w((v4i32) dst3, 0);
SW4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
}
} else if (0 == (height % 2)) {
for (cnt = (height / 2); cnt--;) {
LD_UB2(src, src_stride, src0, src1);
src += (2 * src_stride);
LD_UB2(dst, dst_stride, dst0, dst1);
AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1);
out0 = __msa_copy_u_w((v4i32) dst0, 0);
out1 = __msa_copy_u_w((v4i32) dst1, 0);
SW(out0, dst);
dst += dst_stride;
SW(out1, dst);
dst += dst_stride;
}
if (8 == height) {
LD4(src, stride, src0, src1, src2, src3);
src += 4 * stride;
LD4(src, stride, src4, src5, src6, src7);
SD4(src0, src1, src2, src3, dst, stride);
dst += 4 * stride;
SD4(src4, src5, src6, src7, dst, stride);
} else if (4 == height) {
LD4(src, stride, src0, src1, src2, src3);
SD4(src0, src1, src2, src3, dst, stride);
}
}
static void avg_width8_msa(uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
static void avg_width4_msa(uint8_t *src, uint8_t *dst, int32_t stride,
int32_t height)
{
int32_t cnt;
uint64_t out0, out1, out2, out3;
v16u8 src0, src1, src2, src3;
v16u8 dst0, dst1, dst2, dst3;
uint32_t tp0, tp1, tp2, tp3;
v16u8 src0 = { 0 }, src1 = { 0 }, dst0 = { 0 }, dst1 = { 0 };
for (cnt = (height / 4); cnt--;) {
LD_UB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
if (8 == height) {
LW4(src, stride, tp0, tp1, tp2, tp3);
src += 4 * stride;
INSERT_W4_UB(tp0, tp1, tp2, tp3, src0);
LW4(src, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, src1);
LW4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1);
ST4x8_UB(dst0, dst1, dst, stride);
} else if (4 == height) {
LW4(src, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, src0);
LW4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
dst0 = __msa_aver_u_b(src0, dst0);
ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, stride);
} else if (2 == height) {
LW2(src, stride, tp0, tp1);
INSERT_W2_UB(tp0, tp1, src0);
LW2(dst, stride, tp0, tp1);
INSERT_W2_UB(tp0, tp1, dst0);
dst0 = __msa_aver_u_b(src0, dst0);
ST4x2_UB(dst0, dst, stride);
}
}
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3,
dst0, dst1, dst2, dst3);
static void avg_width8_msa(uint8_t *src, uint8_t *dst, int32_t stride,
int32_t height)
{
uint64_t tp0, tp1, tp2, tp3, tp4, tp5, tp6, tp7;
v16u8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
out0 = __msa_copy_u_d((v2i64) dst0, 0);
out1 = __msa_copy_u_d((v2i64) dst1, 0);
out2 = __msa_copy_u_d((v2i64) dst2, 0);
out3 = __msa_copy_u_d((v2i64) dst3, 0);
SD4(out0, out1, out2, out3, dst, dst_stride);
dst += (4 * dst_stride);
if (8 == height) {
LD4(src, stride, tp0, tp1, tp2, tp3);
src += 4 * stride;
LD4(src, stride, tp4, tp5, tp6, tp7);
INSERT_D2_UB(tp0, tp1, src0);
INSERT_D2_UB(tp2, tp3, src1);
INSERT_D2_UB(tp4, tp5, src2);
INSERT_D2_UB(tp6, tp7, src3);
LD4(dst, stride, tp0, tp1, tp2, tp3);
LD4(dst + 4 * stride, stride, tp4, tp5, tp6, tp7);
INSERT_D2_UB(tp0, tp1, dst0);
INSERT_D2_UB(tp2, tp3, dst1);
INSERT_D2_UB(tp4, tp5, dst2);
INSERT_D2_UB(tp6, tp7, dst3);
AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
dst2, dst3);
ST8x8_UB(dst0, dst1, dst2, dst3, dst, stride);
} else if (4 == height) {
LD4(src, stride, tp0, tp1, tp2, tp3);
INSERT_D2_UB(tp0, tp1, src0);
INSERT_D2_UB(tp2, tp3, src1);
LD4(dst, stride, tp0, tp1, tp2, tp3);
INSERT_D2_UB(tp0, tp1, dst0);
INSERT_D2_UB(tp2, tp3, dst1);
AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1);
ST8x4_UB(dst0, dst1, dst, stride);
}
}
@ -1940,15 +1873,13 @@ void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src,
} else if (y) {
avc_chroma_vt_8w_msa(src, dst, stride, y, (8 - y), height);
} else {
copy_width8_msa(src, stride, dst, stride, height);
copy_width8_msa(src, dst, stride, height);
}
}
void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src,
ptrdiff_t stride, int height, int x, int y)
{
int32_t cnt;
av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
if (x && y) {
@ -1958,12 +1889,7 @@ void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src,
} else if (y) {
avc_chroma_vt_4w_msa(src, dst, stride, y, (8 - y), height);
} else {
for (cnt = height; cnt--;) {
*((uint32_t *) dst) = *((uint32_t *) src);
src += stride;
dst += stride;
}
copy_width4_msa(src, dst, stride, height);
}
}
@ -2003,10 +1929,9 @@ void ff_avg_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_and_aver_dst_8w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_and_aver_dst_8w_msa(src, stride, dst,
stride, y, (8 - y), height);
avc_chroma_vt_and_aver_dst_8w_msa(src, dst, stride, y, (8 - y), height);
} else {
avg_width8_msa(src, stride, dst, stride, height);
avg_width8_msa(src, dst, stride, height);
}
}
@ -2022,10 +1947,9 @@ void ff_avg_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_and_aver_dst_4w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_and_aver_dst_4w_msa(src, stride, dst,
stride, y, (8 - y), height);
avc_chroma_vt_and_aver_dst_4w_msa(src, dst, stride, y, (8 - y), height);
} else {
avg_width4_msa(src, stride, dst, stride, height);
avg_width4_msa(src, dst, stride, height);
}
}
@ -2043,8 +1967,7 @@ void ff_avg_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src,
} else if (x) {
avc_chroma_hz_and_aver_dst_2w_msa(src, dst, stride, x, (8 - x), height);
} else if (y) {
avc_chroma_vt_and_aver_dst_2w_msa(src, stride, dst,
stride, y, (8 - y), height);
avc_chroma_vt_and_aver_dst_2w_msa(src, dst, stride, y, (8 - y), height);
} else {
for (cnt = height; cnt--;) {
dst[0] = (dst[0] + src[0] + 1) >> 1;