diff --git a/postproc/swscale.c b/postproc/swscale.c index 38284a19f5..dd10521582 100644 --- a/postproc/swscale.c +++ b/postproc/swscale.c @@ -1041,6 +1041,21 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out if(min>minFilterSize) minFilterSize= min; } + if (flags & SWS_CPU_CAPS_ALTIVEC) { + // we can handle the special case 4, + // so we don't want to go to the full 8 + if (minFilterSize < 5) + filterAlign = 4; + + // we really don't want to waste our time + // doing useless computation, so fall-back on + // the scalar C code for very small filter. + // vectorizing is worth it only if you have + // decent-sized vector. + if (minFilterSize < 3) + filterAlign = 1; + } + ASSERT(minFilterSize > 0) filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1)); ASSERT(filterSize > 0) @@ -1947,7 +1962,10 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int /* precalculate horizontal scaler filter coefficients */ { - const int filterAlign= (flags & SWS_CPU_CAPS_MMX) ? 4 : 1; + const int filterAlign= + (flags & SWS_CPU_CAPS_MMX) ? 4 : + (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : + 1; initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc, srcW , dstW, filterAlign, 1<<14, @@ -1976,14 +1994,20 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int /* precalculate vertical scaler filter coefficients */ - initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, - srcH , dstH, 1, (1<<12)-4, - (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, - srcFilter->lumV, dstFilter->lumV); - initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, - c->chrSrcH, c->chrDstH, 1, (1<<12)-4, - (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, - srcFilter->chrV, dstFilter->chrV); + { + const int filterAlign= + (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : + 1; + + initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, + srcH , dstH, filterAlign, (1<<12)-4, + (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, + srcFilter->lumV, dstFilter->lumV); + initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, + c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4, + (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, + srcFilter->chrV, dstFilter->chrV); + } // Calculate Buffer Sizes so that they won't run out while handling these damn slices c->vLumBufSize= c->vLumFilterSize; diff --git a/postproc/swscale_altivec_template.c b/postproc/swscale_altivec_template.c index 0f6a3cda34..5cd70683a5 100644 --- a/postproc/swscale_altivec_template.c +++ b/postproc/swscale_altivec_template.c @@ -20,7 +20,19 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -static const vector unsigned int altivec_vectorShiftInt19 = {19, 19, 19, 19}; +#ifdef CONFIG_DARWIN +static const vector signed int vzero = + (vector signed int)(0, 0, 0, 0); +static const vector unsigned int altivec_vectorShiftInt19 = + (vector unsigned int)(19, 19, 19, 19); +#else +static const vector signed int vzero = + (vector signed int){0,0,0,0}; +static const vector unsigned int altivec_vectorShiftInt19 = + (vector unsigned int){19, 19, 19, 19}; + +#endif + static inline void altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) { register int i; @@ -201,3 +213,175 @@ yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, altivec_packIntArrayToCharArray(v,vDest,chrDstW); } } + +static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) { + register int i; + int __attribute__ ((aligned (16))) tempo[4]; + + if (filterSize % 4) { + for(i=0; i>7), (1<<15)-1); + } + } + else + switch (filterSize) { + case 4: + { + for(i=0; i 12) { + src_v1 = vec_ld(srcPos + 16, src); + } + vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + vector signed short src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // now put our elements in the even slots + src_v = vec_mergeh(src_v, (vector signed short)vzero); + + vector signed short filter_v = vec_ld(i << 3, filter); + // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2) + + // the neat trick : we only care for half the elements, + // high or low depending on (i<<3)%16 (it's 0 or 8 here), + // and we're going to use vec_mule, so we chose + // carefully how to "unpack" the elements into the even slots + if ((i << 3) % 16) + filter_v = vec_mergel(filter_v,(vector signed short)vzero); + else + filter_v = vec_mergeh(filter_v,(vector signed short)vzero); + + vector signed int val_vEven = vec_mule(src_v, filter_v); + vector signed int val_s = vec_sums(val_vEven, vzero); + vec_st(val_s, 0, tempo); + dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1); + } + } + break; + + case 8: + { + for(i=0; i 8) { + src_v1 = vec_ld(srcPos + 16, src); + } + vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + vector signed short src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + vector signed short filter_v = vec_ld(i << 4, filter); + // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) + + vector signed int val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); + vector signed int val_s = vec_sums(val_v, vzero); + vec_st(val_s, 0, tempo); + dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1); + } + } + break; + + case 16: + { + for(i=0; i>7), (1<<15)-1); + } + } + break; + + default: + { + for(i=0; i 8) { + src_v1 = vec_ld(srcPos + j + 16, src); + } + vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS); + + vector signed short src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // loading filter_v0R is useless, it's already done above + //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); + vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); + vector signed short filter_v = vec_perm(filter_v0R, filter_v1R, permF); + + val_v = vec_msums(src_v, filter_v, val_v); + } + + vector signed int val_s = vec_sums(val_v, vzero); + + vec_st(val_s, 0, tempo); + dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1); + } + + } + } +} diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c index 6a8117f2fb..2d266f5655 100644 --- a/postproc/swscale_template.c +++ b/postproc/swscale_template.c @@ -2153,6 +2153,9 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW : "%ebx", "%eax", "%ecx" ); } +#else +#ifdef HAVE_ALTIVEC + hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize); #else int i; for(i=0; i>7; } #endif +#endif } // *** horizontal scale Y line to temp buffer static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,