You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-15 14:13:16 +02:00
Merge commit '30f3f959879eee7890973e8cc9ce076450ced111'
* commit '30f3f959879eee7890973e8cc9ce076450ced111': ppc: dsputil: K&R formatting cosmetics Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
@@ -24,6 +24,7 @@
|
||||
#if HAVE_ALTIVEC_H
|
||||
#include <altivec.h>
|
||||
#endif
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/ppc/types_altivec.h"
|
||||
#include "libavutil/ppc/util_altivec.h"
|
||||
@@ -31,11 +32,13 @@
|
||||
#include "libavcodec/dsputil.h"
|
||||
#include "dsputil_altivec.h"
|
||||
|
||||
static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
|
||||
const vector unsigned char zero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
vector unsigned char perm1 = vec_lvsl(0, pix2);
|
||||
vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
|
||||
vector unsigned char pix2l, pix2r;
|
||||
@@ -75,11 +78,13 @@ static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size
|
||||
return s;
|
||||
}
|
||||
|
||||
static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
|
||||
const vector unsigned char zero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
vector unsigned char perm = vec_lvsl(0, pix2);
|
||||
vector unsigned char pix2l, pix2r;
|
||||
vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
|
||||
@@ -123,7 +128,6 @@ static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size
|
||||
pix1 += line_size;
|
||||
pix2v = pix3v;
|
||||
pix3 += line_size;
|
||||
|
||||
}
|
||||
|
||||
/* Sum up the four partial sums, and put the result into s. */
|
||||
@@ -133,13 +137,16 @@ static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size
|
||||
return s;
|
||||
}
|
||||
|
||||
static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
uint8_t *pix3 = pix2 + line_size;
|
||||
const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
|
||||
const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
|
||||
const vector unsigned char zero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
const vector unsigned short two =
|
||||
(const vector unsigned short) vec_splat_u16(2);
|
||||
vector unsigned char avgv, t5;
|
||||
vector unsigned char perm1 = vec_lvsl(0, pix2);
|
||||
vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
|
||||
@@ -228,11 +235,13 @@ static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_siz
|
||||
return s;
|
||||
}
|
||||
|
||||
static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
vector unsigned char perm = vec_lvsl(0, pix2);
|
||||
vector unsigned char t1, t2, t3, t4, t5;
|
||||
vector unsigned int sad;
|
||||
@@ -240,7 +249,6 @@ static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, i
|
||||
|
||||
sad = (vector unsigned int) vec_splat_u32(0);
|
||||
|
||||
|
||||
for (i = 0; i < h; i++) {
|
||||
/* Read potentially unaligned pixels into t1 and t2. */
|
||||
vector unsigned char pix2l = vec_ld(0, pix2);
|
||||
@@ -268,12 +276,16 @@ static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, i
|
||||
return s;
|
||||
}
|
||||
|
||||
static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
const vector unsigned char permclear =
|
||||
(vector unsigned char)
|
||||
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
vector unsigned char perm1 = vec_lvsl(0, pix1);
|
||||
vector unsigned char perm2 = vec_lvsl(0, pix2);
|
||||
vector unsigned char t1, t2, t3, t4, t5;
|
||||
@@ -317,7 +329,8 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
vector unsigned char perm = vec_lvsl(0, pix);
|
||||
vector unsigned char pixv;
|
||||
vector unsigned int sv;
|
||||
@@ -347,12 +360,16 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
|
||||
|
||||
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
|
||||
* It's the sad8_altivec code above w/ squaring added. */
|
||||
static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
const vector unsigned char permclear =
|
||||
(vector unsigned char)
|
||||
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
vector unsigned char perm1 = vec_lvsl(0, pix1);
|
||||
vector unsigned char perm2 = vec_lvsl(0, pix2);
|
||||
vector unsigned char t1, t2, t3, t4, t5;
|
||||
@@ -397,11 +414,13 @@ static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, in
|
||||
|
||||
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
|
||||
* It's the sad16_altivec code above w/ squaring added. */
|
||||
static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
||||
static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2,
|
||||
int line_size, int h)
|
||||
{
|
||||
int i;
|
||||
int s;
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
vector unsigned char perm = vec_lvsl(0, pix2);
|
||||
vector unsigned char t1, t2, t3, t4, t5;
|
||||
vector unsigned int sum;
|
||||
@@ -441,7 +460,8 @@ static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, i
|
||||
|
||||
static int pix_sum_altivec(uint8_t *pix, int line_size)
|
||||
{
|
||||
const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
|
||||
const vector unsigned int zero =
|
||||
(const vector unsigned int) vec_splat_u32(0);
|
||||
vector unsigned char perm = vec_lvsl(0, pix);
|
||||
vector unsigned char t1;
|
||||
vector unsigned int sad;
|
||||
@@ -472,21 +492,20 @@ static int pix_sum_altivec(uint8_t * pix, int line_size)
|
||||
return s;
|
||||
}
|
||||
|
||||
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, int line_size)
|
||||
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
vector unsigned char perm = vec_lvsl(0, pixels);
|
||||
vector unsigned char bytes;
|
||||
const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
|
||||
const vector unsigned char zero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
vector signed short shorts;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
/* Read potentially unaligned pixels.
|
||||
* We're reading 16 pixels, and actually only want 8,
|
||||
* but we simply ignore the extras. */
|
||||
// Read potentially unaligned pixels.
|
||||
// We're reading 16 pixels, and actually only want 8,
|
||||
// but we simply ignore the extras.
|
||||
vector unsigned char pixl = vec_ld(0, pixels);
|
||||
vector unsigned char pixr = vec_ld(7, pixels);
|
||||
bytes = vec_perm(pixl, pixr, perm);
|
||||
@@ -508,7 +527,8 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
|
||||
vector unsigned char perm1 = vec_lvsl(0, s1);
|
||||
vector unsigned char perm2 = vec_lvsl(0, s2);
|
||||
vector unsigned char bytes, pixl, pixr;
|
||||
const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
|
||||
const vector unsigned char zero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
vector signed short shorts1, shorts2;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
@@ -573,8 +593,8 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void clear_block_altivec(int16_t *block) {
|
||||
static void clear_block_altivec(int16_t *block)
|
||||
{
|
||||
LOAD_ZERO;
|
||||
vec_st(zero_s16v, 0, block);
|
||||
vec_st(zero_s16v, 16, block);
|
||||
@@ -586,8 +606,8 @@ static void clear_block_altivec(int16_t *block) {
|
||||
vec_st(zero_s16v, 112, block);
|
||||
}
|
||||
|
||||
|
||||
static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
|
||||
static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w)
|
||||
{
|
||||
register int i;
|
||||
register vector unsigned char vdst, vsrc;
|
||||
|
||||
@@ -599,31 +619,35 @@ static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
|
||||
vec_st(vdst, i, (unsigned char *) dst);
|
||||
}
|
||||
/* If w is not a multiple of 16. */
|
||||
for (; (i < w) ; i++) {
|
||||
for (; (i < w); i++)
|
||||
dst[i] = src[i];
|
||||
}
|
||||
}
|
||||
|
||||
static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
|
||||
static int hadamard8_diff8x8_altivec(/* MpegEncContext */ void *s, uint8_t *dst,
|
||||
uint8_t *src, int stride, int h)
|
||||
{
|
||||
int sum;
|
||||
register const vector unsigned char vzero =
|
||||
(const vector unsigned char) vec_splat_u8(0);
|
||||
register vector signed short temp0, temp1, temp2, temp3, temp4,
|
||||
temp5, temp6, temp7;
|
||||
{
|
||||
register const vector signed short vprod1 =(const vector signed short)
|
||||
{ 1,-1, 1,-1, 1,-1, 1,-1 };
|
||||
register const vector signed short vprod2 =(const vector signed short)
|
||||
{ 1, 1,-1,-1, 1, 1,-1,-1 };
|
||||
register const vector signed short vprod3 =(const vector signed short)
|
||||
{ 1, 1, 1, 1,-1,-1,-1,-1 };
|
||||
register const vector unsigned char perm1 = (const vector unsigned char)
|
||||
register const vector signed short vprod1 =
|
||||
(const vector signed short) { 1, -1, 1, -1, 1, -1, 1, -1 };
|
||||
register const vector signed short vprod2 =
|
||||
(const vector signed short) { 1, 1, -1, -1, 1, 1, -1, -1 };
|
||||
register const vector signed short vprod3 =
|
||||
(const vector signed short) { 1, 1, 1, 1, -1, -1, -1, -1 };
|
||||
register const vector unsigned char perm1 =
|
||||
(const vector unsigned char)
|
||||
{ 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
|
||||
0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D };
|
||||
register const vector unsigned char perm2 = (const vector unsigned char)
|
||||
register const vector unsigned char perm2 =
|
||||
(const vector unsigned char)
|
||||
{ 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
|
||||
0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B };
|
||||
register const vector unsigned char perm3 = (const vector unsigned char)
|
||||
register const vector unsigned char perm3 =
|
||||
(const vector unsigned char)
|
||||
{ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
|
||||
|
||||
@@ -726,7 +750,9 @@ static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, u
|
||||
* On the 970, the hand-made RA is still a win (around 690 vs. around 780),
|
||||
* but xlc goes to around 660 on the regular C code...
|
||||
*/
|
||||
static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
|
||||
static int hadamard8_diff16x8_altivec(/* MpegEncContext */ void *s, uint8_t *dst,
|
||||
uint8_t *src, int stride, int h)
|
||||
{
|
||||
int sum;
|
||||
register vector signed short
|
||||
temp0 __asm__ ("v0"),
|
||||
@@ -751,18 +777,23 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
|
||||
{
|
||||
register const vector signed short vprod1 __asm__ ("v16") =
|
||||
(const vector signed short) { 1, -1, 1, -1, 1, -1, 1, -1 };
|
||||
|
||||
register const vector signed short vprod2 __asm__ ("v17") =
|
||||
(const vector signed short) { 1, 1, -1, -1, 1, 1, -1, -1 };
|
||||
|
||||
register const vector signed short vprod3 __asm__ ("v18") =
|
||||
(const vector signed short) { 1, 1, 1, 1, -1, -1, -1, -1 };
|
||||
|
||||
register const vector unsigned char perm1 __asm__ ("v19") =
|
||||
(const vector unsigned char)
|
||||
{ 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
|
||||
0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D };
|
||||
|
||||
register const vector unsigned char perm2 __asm__ ("v20") =
|
||||
(const vector unsigned char)
|
||||
{ 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
|
||||
0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B };
|
||||
|
||||
register const vector unsigned char perm3 __asm__ ("v21") =
|
||||
(const vector unsigned char)
|
||||
{ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
@@ -770,14 +801,16 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
|
||||
|
||||
#define ONEITERBUTTERFLY(i, res1, res2) \
|
||||
{ \
|
||||
register vector unsigned char src1 __asm__ ("v22"), \
|
||||
register vector unsigned char \
|
||||
src1 __asm__ ("v22"), \
|
||||
src2 __asm__ ("v23"), \
|
||||
dst1 __asm__ ("v24"), \
|
||||
dst2 __asm__ ("v25"), \
|
||||
srcO __asm__ ("v22"), \
|
||||
dstO __asm__ ("v23"); \
|
||||
\
|
||||
register vector signed short srcV __asm__ ("v24"), \
|
||||
register vector signed short \
|
||||
srcV __asm__ ("v24"), \
|
||||
dstV __asm__ ("v25"), \
|
||||
srcW __asm__ ("v26"), \
|
||||
dstW __asm__ ("v27"), \
|
||||
@@ -921,7 +954,9 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
|
||||
return sum;
|
||||
}
|
||||
|
||||
static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
|
||||
static int hadamard8_diff16_altivec(/* MpegEncContext */ void *s, uint8_t *dst,
|
||||
uint8_t *src, int stride, int h)
|
||||
{
|
||||
int score;
|
||||
score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
|
||||
if (h == 16) {
|
||||
@@ -941,14 +976,18 @@ av_cold void ff_dsputil_init_altivec(DSPContext *c, AVCodecContext *avctx)
|
||||
c->pix_abs[0][3] = sad16_xy2_altivec;
|
||||
c->pix_abs[0][0] = sad16_altivec;
|
||||
c->pix_abs[1][0] = sad8_altivec;
|
||||
|
||||
c->sad[0] = sad16_altivec;
|
||||
c->sad[1] = sad8_altivec;
|
||||
c->pix_norm1 = pix_norm1_altivec;
|
||||
c->sse[1]= sse8_altivec;
|
||||
c->sse[0] = sse16_altivec;
|
||||
c->sse[1] = sse8_altivec;
|
||||
|
||||
c->pix_norm1 = pix_norm1_altivec;
|
||||
c->pix_sum = pix_sum_altivec;
|
||||
|
||||
c->diff_pixels = diff_pixels_altivec;
|
||||
c->add_bytes = add_bytes_altivec;
|
||||
|
||||
if (!high_bit_depth) {
|
||||
c->get_pixels = get_pixels_altivec;
|
||||
c->clear_block = clear_block_altivec;
|
||||
|
@@ -24,11 +24,13 @@
|
||||
#define AVCODEC_PPC_DSPUTIL_ALTIVEC_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavcodec/dsputil.h"
|
||||
|
||||
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h);
|
||||
|
||||
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h);
|
||||
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels,
|
||||
ptrdiff_t line_size, int h);
|
||||
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels,
|
||||
ptrdiff_t line_size, int h);
|
||||
|
||||
void ff_fdct_altivec(int16_t *block);
|
||||
void ff_gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h,
|
||||
|
@@ -53,6 +53,7 @@ static void clear_blocks_dcbz32_ppc(int16_t *blocks)
|
||||
{
|
||||
register int misal = ((unsigned long) blocks & 0x00000010);
|
||||
register int i = 0;
|
||||
|
||||
if (misal) {
|
||||
((unsigned long *) blocks)[0] = 0L;
|
||||
((unsigned long *) blocks)[1] = 0L;
|
||||
@@ -60,9 +61,8 @@ static void clear_blocks_dcbz32_ppc(int16_t *blocks)
|
||||
((unsigned long *) blocks)[3] = 0L;
|
||||
i += 16;
|
||||
}
|
||||
for ( ; i < sizeof(int16_t)*6*64-31 ; i += 32) {
|
||||
for (; i < sizeof(int16_t) * 6 * 64 - 31; i += 32)
|
||||
__asm__ volatile ("dcbz %0,%1" :: "b" (blocks), "r" (i) : "memory");
|
||||
}
|
||||
if (misal) {
|
||||
((unsigned long *) blocks)[188] = 0L;
|
||||
((unsigned long *) blocks)[189] = 0L;
|
||||
@@ -79,14 +79,14 @@ static void clear_blocks_dcbz128_ppc(int16_t *blocks)
|
||||
{
|
||||
register int misal = ((unsigned long) blocks & 0x0000007f);
|
||||
register int i = 0;
|
||||
|
||||
if (misal) {
|
||||
/* We could probably also optimize this case,
|
||||
* but there's not much point as the machines
|
||||
* aren't available yet (2003-06-26). */
|
||||
memset(blocks, 0, sizeof(int16_t) * 6 * 64);
|
||||
}
|
||||
else
|
||||
for ( ; i < sizeof(int16_t)*6*64 ; i += 128) {
|
||||
} else {
|
||||
for (; i < sizeof(int16_t) * 6 * 64; i += 128)
|
||||
__asm__ volatile ("dcbzl %0,%1" :: "b" (blocks), "r" (i) : "memory");
|
||||
}
|
||||
}
|
||||
@@ -110,9 +110,8 @@ static long check_dcbzl_effect(void)
|
||||
register long i = 0;
|
||||
long count = 0;
|
||||
|
||||
if (!fakedata) {
|
||||
if (!fakedata)
|
||||
return 0L;
|
||||
}
|
||||
|
||||
fakedata_middle = (fakedata + 512);
|
||||
|
||||
@@ -122,10 +121,9 @@ static long check_dcbzl_effect(void)
|
||||
* in gcc-3.3 / RS/6000 speaks. Seems to avoid using r0, so.... */
|
||||
__asm__ volatile ("dcbzl %0, %1" :: "b" (fakedata_middle), "r" (zero));
|
||||
|
||||
for (i = 0; i < 1024 ; i ++) {
|
||||
for (i = 0; i < 1024; i++)
|
||||
if (fakedata[i] == (char) 0)
|
||||
count++;
|
||||
}
|
||||
|
||||
av_free(fakedata);
|
||||
|
||||
@@ -178,6 +176,5 @@ av_cold void ff_dsputil_init_ppc(DSPContext *c, AVCodecContext *avctx)
|
||||
c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user