You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-10 06:10:52 +02:00
Merge commit '9c12c6ff9539e926df0b2a2299e915ae71872600'
* commit '9c12c6ff9539e926df0b2a2299e915ae71872600':
motion_est: convert stride to ptrdiff_t
Conflicts:
libavcodec/me_cmp.c
libavcodec/ppc/me_cmp.c
libavcodec/x86/me_cmp_init.c
See: 9c669672c7
Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
@@ -26,17 +26,17 @@
|
|||||||
#include "libavcodec/mpegvideo.h"
|
#include "libavcodec/mpegvideo.h"
|
||||||
|
|
||||||
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
|
|
||||||
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
|
|
||||||
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
|
|
||||||
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
|
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
|
@@ -32,7 +32,7 @@
|
|||||||
uint32_t ff_square_tab[512] = { 0, };
|
uint32_t ff_square_tab[512] = { 0, };
|
||||||
|
|
||||||
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint32_t *sq = ff_square_tab + 256;
|
uint32_t *sq = ff_square_tab + 256;
|
||||||
@@ -42,14 +42,14 @@ static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += sq[pix1[1] - pix2[1]];
|
s += sq[pix1[1] - pix2[1]];
|
||||||
s += sq[pix1[2] - pix2[2]];
|
s += sq[pix1[2] - pix2[2]];
|
||||||
s += sq[pix1[3] - pix2[3]];
|
s += sq[pix1[3] - pix2[3]];
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint32_t *sq = ff_square_tab + 256;
|
uint32_t *sq = ff_square_tab + 256;
|
||||||
@@ -63,14 +63,14 @@ static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += sq[pix1[5] - pix2[5]];
|
s += sq[pix1[5] - pix2[5]];
|
||||||
s += sq[pix1[6] - pix2[6]];
|
s += sq[pix1[6] - pix2[6]];
|
||||||
s += sq[pix1[7] - pix2[7]];
|
s += sq[pix1[7] - pix2[7]];
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint32_t *sq = ff_square_tab + 256;
|
uint32_t *sq = ff_square_tab + 256;
|
||||||
@@ -93,8 +93,8 @@ static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += sq[pix1[14] - pix2[14]];
|
s += sq[pix1[14] - pix2[14]];
|
||||||
s += sq[pix1[15] - pix2[15]];
|
s += sq[pix1[15] - pix2[15]];
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@@ -112,7 +112,7 @@ static int sum_abs_dctelem_c(int16_t *block)
|
|||||||
#define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
|
#define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
|
||||||
|
|
||||||
static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
|
|
||||||
@@ -133,14 +133,14 @@ static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[13] - pix2[13]);
|
s += abs(pix1[13] - pix2[13]);
|
||||||
s += abs(pix1[14] - pix2[14]);
|
s += abs(pix1[14] - pix2[14]);
|
||||||
s += abs(pix1[15] - pix2[15]);
|
s += abs(pix1[15] - pix2[15]);
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
|
|
||||||
@@ -161,17 +161,17 @@ static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
|
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
|
||||||
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
|
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
|
||||||
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
|
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
|
|
||||||
for (i = 0; i < h; i++) {
|
for (i = 0; i < h; i++) {
|
||||||
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
|
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
|
||||||
@@ -190,18 +190,18 @@ static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
|
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
|
||||||
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
|
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
|
||||||
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
|
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
|
|
||||||
for (i = 0; i < h; i++) {
|
for (i = 0; i < h; i++) {
|
||||||
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
|
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
|
||||||
@@ -220,15 +220,15 @@ static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
|
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
|
||||||
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
|
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
|
||||||
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
|
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
|
|
||||||
@@ -241,14 +241,14 @@ static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[5] - pix2[5]);
|
s += abs(pix1[5] - pix2[5]);
|
||||||
s += abs(pix1[6] - pix2[6]);
|
s += abs(pix1[6] - pix2[6]);
|
||||||
s += abs(pix1[7] - pix2[7]);
|
s += abs(pix1[7] - pix2[7]);
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
|
|
||||||
@@ -261,17 +261,17 @@ static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
|
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
|
||||||
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
|
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
|
||||||
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
|
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
|
|
||||||
for (i = 0; i < h; i++) {
|
for (i = 0; i < h; i++) {
|
||||||
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
|
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
|
||||||
@@ -282,18 +282,18 @@ static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
|
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
|
||||||
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
|
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
|
||||||
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
|
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int s = 0, i;
|
int s = 0, i;
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
|
|
||||||
for (i = 0; i < h; i++) {
|
for (i = 0; i < h; i++) {
|
||||||
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
|
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
|
||||||
@@ -304,14 +304,15 @@ static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
|
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
|
||||||
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
|
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
|
||||||
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
|
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
|
static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int score1 = 0, score2 = 0, x, y;
|
int score1 = 0, score2 = 0, x, y;
|
||||||
|
|
||||||
@@ -335,7 +336,8 @@ static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int
|
|||||||
return score1 + FFABS(score2) * 8;
|
return score1 + FFABS(score2) * 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
|
static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int score1 = 0, score2 = 0, x, y;
|
int score1 = 0, score2 = 0, x, y;
|
||||||
|
|
||||||
@@ -360,7 +362,7 @@ static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
|
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
|
||||||
int stride, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -443,7 +445,7 @@ void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
|
|||||||
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
|
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
|
||||||
|
|
||||||
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
|
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
|
||||||
uint8_t *src, int stride, int h)
|
uint8_t *src, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i, temp[64], sum = 0;
|
int i, temp[64], sum = 0;
|
||||||
|
|
||||||
@@ -495,7 +497,7 @@ static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
|
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
|
||||||
uint8_t *dummy, int stride, int h)
|
uint8_t *dummy, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i, temp[64], sum = 0;
|
int i, temp[64], sum = 0;
|
||||||
|
|
||||||
@@ -547,7 +549,7 @@ static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
||||||
uint8_t *src2, int stride, int h)
|
uint8_t *src2, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
||||||
|
|
||||||
@@ -588,7 +590,7 @@ static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
||||||
uint8_t *src2, int stride, int h)
|
uint8_t *src2, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int16_t dct[8][8];
|
int16_t dct[8][8];
|
||||||
int i, sum = 0;
|
int i, sum = 0;
|
||||||
@@ -613,7 +615,7 @@ static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
|
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
|
||||||
uint8_t *src2, int stride, int h)
|
uint8_t *src2, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
||||||
int sum = 0, i;
|
int sum = 0, i;
|
||||||
@@ -630,7 +632,7 @@ static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
|
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
|
||||||
uint8_t *src2, int stride, int h)
|
uint8_t *src2, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
|
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
|
||||||
int16_t *const bak = temp + 64;
|
int16_t *const bak = temp + 64;
|
||||||
@@ -655,7 +657,7 @@ static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
||||||
int stride, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
const uint8_t *scantable = s->intra_scantable.permutated;
|
const uint8_t *scantable = s->intra_scantable.permutated;
|
||||||
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
||||||
@@ -732,7 +734,7 @@ static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
||||||
int stride, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
const uint8_t *scantable = s->intra_scantable.permutated;
|
const uint8_t *scantable = s->intra_scantable.permutated;
|
||||||
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
LOCAL_ALIGNED_16(int16_t, temp, [64]);
|
||||||
@@ -795,7 +797,7 @@ static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
|
|||||||
#define VSAD_INTRA(size) \
|
#define VSAD_INTRA(size) \
|
||||||
static int vsad_intra ## size ## _c(MpegEncContext *c, \
|
static int vsad_intra ## size ## _c(MpegEncContext *c, \
|
||||||
uint8_t *s, uint8_t *dummy, \
|
uint8_t *s, uint8_t *dummy, \
|
||||||
int stride, int h) \
|
ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
int score = 0, x, y; \
|
int score = 0, x, y; \
|
||||||
\
|
\
|
||||||
@@ -817,7 +819,7 @@ VSAD_INTRA(16)
|
|||||||
#define VSAD(size) \
|
#define VSAD(size) \
|
||||||
static int vsad ## size ## _c(MpegEncContext *c, \
|
static int vsad ## size ## _c(MpegEncContext *c, \
|
||||||
uint8_t *s1, uint8_t *s2, \
|
uint8_t *s1, uint8_t *s2, \
|
||||||
int stride, int h) \
|
ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
int score = 0, x, y; \
|
int score = 0, x, y; \
|
||||||
\
|
\
|
||||||
@@ -837,7 +839,7 @@ VSAD(16)
|
|||||||
#define VSSE_INTRA(size) \
|
#define VSSE_INTRA(size) \
|
||||||
static int vsse_intra ## size ## _c(MpegEncContext *c, \
|
static int vsse_intra ## size ## _c(MpegEncContext *c, \
|
||||||
uint8_t *s, uint8_t *dummy, \
|
uint8_t *s, uint8_t *dummy, \
|
||||||
int stride, int h) \
|
ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
int score = 0, x, y; \
|
int score = 0, x, y; \
|
||||||
\
|
\
|
||||||
@@ -858,7 +860,7 @@ VSSE_INTRA(16)
|
|||||||
|
|
||||||
#define VSSE(size) \
|
#define VSSE(size) \
|
||||||
static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, \
|
static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, \
|
||||||
int stride, int h) \
|
ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
int score = 0, x, y; \
|
int score = 0, x, y; \
|
||||||
\
|
\
|
||||||
@@ -876,7 +878,7 @@ VSSE(16)
|
|||||||
|
|
||||||
#define WRAPPER8_16_SQ(name8, name16) \
|
#define WRAPPER8_16_SQ(name8, name16) \
|
||||||
static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
|
static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
|
||||||
int stride, int h) \
|
ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
int score = 0; \
|
int score = 0; \
|
||||||
\
|
\
|
||||||
|
@@ -47,7 +47,8 @@ struct MpegEncContext;
|
|||||||
* width < 8 are neither used nor implemented. */
|
* width < 8 are neither used nor implemented. */
|
||||||
typedef int (*me_cmp_func)(struct MpegEncContext *c,
|
typedef int (*me_cmp_func)(struct MpegEncContext *c,
|
||||||
uint8_t *blk1 /* align width (8 or 16) */,
|
uint8_t *blk1 /* align width (8 or 16) */,
|
||||||
uint8_t *blk2 /* align 1 */, int line_size, int h);
|
uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
|
||||||
|
int h);
|
||||||
|
|
||||||
typedef struct MECmpContext {
|
typedef struct MECmpContext {
|
||||||
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
|
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
|
||||||
|
@@ -290,7 +290,7 @@ static int cmp_qpel(MpegEncContext *s, const int x, const int y, const int subx,
|
|||||||
#include "motion_est_template.c"
|
#include "motion_est_template.c"
|
||||||
|
|
||||||
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
|
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
|
||||||
int stride, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -55,7 +55,7 @@
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s = 0;
|
int __attribute__((aligned(16))) s = 0;
|
||||||
@@ -83,8 +83,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Add each 4 pixel group together and put 4 results into sad. */
|
/* Add each 4 pixel group together and put 4 results into sad. */
|
||||||
sad = vec_sum4s(t5, sad);
|
sad = vec_sum4s(t5, sad);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
|
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
|
||||||
@@ -95,7 +95,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s = 0;
|
int __attribute__((aligned(16))) s = 0;
|
||||||
@@ -105,9 +105,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
|
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
|
||||||
vector signed int sumdiffs;
|
vector signed int sumdiffs;
|
||||||
|
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
|
|
||||||
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
|
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
|
||||||
* iteration becomes pix2 in the next iteration. We can use this
|
* iteration becomes pix2 in the next iteration. We can use this
|
||||||
* fact to avoid a potentially expensive unaligned read, each
|
* fact to avoid a potentially expensive unaligned read, each
|
||||||
* time around the loop.
|
* time around the loop.
|
||||||
@@ -132,9 +132,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Add each 4 pixel group together and put 4 results into sad. */
|
/* Add each 4 pixel group together and put 4 results into sad. */
|
||||||
sad = vec_sum4s(t5, sad);
|
sad = vec_sum4s(t5, sad);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2v = pix3v;
|
pix2v = pix3v;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
@@ -145,11 +145,11 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s = 0;
|
int __attribute__((aligned(16))) s = 0;
|
||||||
uint8_t *pix3 = pix2 + line_size;
|
uint8_t *pix3 = pix2 + stride;
|
||||||
const vector unsigned char zero =
|
const vector unsigned char zero =
|
||||||
(const vector unsigned char) vec_splat_u8(0);
|
(const vector unsigned char) vec_splat_u8(0);
|
||||||
const vector unsigned short two =
|
const vector unsigned short two =
|
||||||
@@ -163,7 +163,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
vector unsigned char perm1, perm2, pix2v, pix2iv;
|
vector unsigned char perm1, perm2, pix2v, pix2iv;
|
||||||
GET_PERM(perm1, perm2, pix2);
|
GET_PERM(perm1, perm2, pix2);
|
||||||
|
|
||||||
/* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
|
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
|
||||||
* iteration becomes pix2 in the next iteration. We can use this
|
* iteration becomes pix2 in the next iteration. We can use this
|
||||||
* fact to avoid a potentially expensive unaligned read, as well
|
* fact to avoid a potentially expensive unaligned read, as well
|
||||||
* as some splitting, and vector addition each time around the loop.
|
* as some splitting, and vector addition each time around the loop.
|
||||||
@@ -219,8 +219,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Add each 4 pixel group together and put 4 results into sad. */
|
/* Add each 4 pixel group together and put 4 results into sad. */
|
||||||
sad = vec_sum4s(t5, sad);
|
sad = vec_sum4s(t5, sad);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix3 += line_size;
|
pix3 += stride;
|
||||||
/* Transfer the calculated values for pix3 into pix2. */
|
/* Transfer the calculated values for pix3 into pix2. */
|
||||||
t1 = t3;
|
t1 = t3;
|
||||||
t2 = t4;
|
t2 = t4;
|
||||||
@@ -234,7 +234,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s;
|
int __attribute__((aligned(16))) s;
|
||||||
@@ -256,8 +256,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Add each 4 pixel group together and put 4 results into sad. */
|
/* Add each 4 pixel group together and put 4 results into sad. */
|
||||||
sad = vec_sum4s(t5, sad);
|
sad = vec_sum4s(t5, sad);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
@@ -269,7 +269,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s;
|
int __attribute__((aligned(16))) s;
|
||||||
@@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Add each 4 pixel group together and put 4 results into sad. */
|
/* Add each 4 pixel group together and put 4 results into sad. */
|
||||||
sad = vec_sum4s(t5, sad);
|
sad = vec_sum4s(t5, sad);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
@@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
|
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
|
||||||
* It's the sad8_altivec code above w/ squaring added. */
|
* It's the sad8_altivec code above w/ squaring added. */
|
||||||
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s;
|
int __attribute__((aligned(16))) s;
|
||||||
@@ -343,8 +343,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Square the values and add them to our sum. */
|
/* Square the values and add them to our sum. */
|
||||||
sum = vec_msum(t5, t5, sum);
|
sum = vec_msum(t5, t5, sum);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
@@ -358,7 +358,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
|
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
|
||||||
* It's the sad16_altivec code above w/ squaring added. */
|
* It's the sad16_altivec code above w/ squaring added. */
|
||||||
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int __attribute__((aligned(16))) s;
|
int __attribute__((aligned(16))) s;
|
||||||
@@ -383,8 +383,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
/* Square the values and add them to our sum. */
|
/* Square the values and add them to our sum. */
|
||||||
sum = vec_msum(t5, t5, sum);
|
sum = vec_msum(t5, t5, sum);
|
||||||
|
|
||||||
pix1 += line_size;
|
pix1 += stride;
|
||||||
pix2 += line_size;
|
pix2 += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sum up the four partial sums, and put the result into s. */
|
/* Sum up the four partial sums, and put the result into s. */
|
||||||
@@ -396,7 +396,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
|
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
|
||||||
uint8_t *src, int stride, int h)
|
uint8_t *src, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int __attribute__((aligned(16))) sum;
|
int __attribute__((aligned(16))) sum;
|
||||||
register const vector unsigned char vzero =
|
register const vector unsigned char vzero =
|
||||||
@@ -522,7 +522,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
|
|||||||
* but xlc goes to around 660 on the regular C code...
|
* but xlc goes to around 660 on the regular C code...
|
||||||
*/
|
*/
|
||||||
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
|
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
|
||||||
uint8_t *src, int stride, int h)
|
uint8_t *src, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int __attribute__((aligned(16))) sum;
|
int __attribute__((aligned(16))) sum;
|
||||||
register vector signed short
|
register vector signed short
|
||||||
@@ -713,7 +713,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
|
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
|
||||||
uint8_t *src, int stride, int h)
|
uint8_t *src, ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
|
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
|
||||||
|
|
||||||
|
@@ -745,7 +745,7 @@ void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
|
|||||||
decomposition_count, y);
|
decomposition_count, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size,
|
static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size,
|
||||||
int w, int h, int type)
|
int w, int h, int type)
|
||||||
{
|
{
|
||||||
int s, i, j;
|
int s, i, j;
|
||||||
@@ -814,32 +814,32 @@ static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, in
|
|||||||
return s >> 9;
|
return s >> 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 8, h, 1);
|
return w_c(v, pix1, pix2, line_size, 8, h, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 8, h, 0);
|
return w_c(v, pix1, pix2, line_size, 8, h, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 16, h, 1);
|
return w_c(v, pix1, pix2, line_size, 16, h, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 16, h, 0);
|
return w_c(v, pix1, pix2, line_size, 16, h, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 32, h, 1);
|
return w_c(v, pix1, pix2, line_size, 32, h, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
|
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
|
||||||
{
|
{
|
||||||
return w_c(v, pix1, pix2, line_size, 32, h, 0);
|
return w_c(v, pix1, pix2, line_size, 32, h, 0);
|
||||||
}
|
}
|
||||||
|
@@ -105,8 +105,8 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
|
|||||||
int src_y, int src_stride, slice_buffer *sb,
|
int src_y, int src_stride, slice_buffer *sb,
|
||||||
int add, uint8_t *dst8);
|
int add, uint8_t *dst8);
|
||||||
|
|
||||||
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
|
||||||
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
|
int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
|
||||||
|
|
||||||
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
|
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
|
||||||
int type, int decomposition_count);
|
int type, int decomposition_count);
|
||||||
|
@@ -215,7 +215,7 @@ hadamard8_16_wrapper %1, 3
|
|||||||
%elif cpuflag(mmx)
|
%elif cpuflag(mmx)
|
||||||
ALIGN 16
|
ALIGN 16
|
||||||
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
|
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
|
||||||
; uint8_t *src2, int stride, int h)
|
; uint8_t *src2, ptrdiff_t stride, int h)
|
||||||
; r0 = void *s = unused, int h = unused (always 8)
|
; r0 = void *s = unused, int h = unused (always 8)
|
||||||
; note how r1, r2 and r3 are not clobbered in this function, so 16x16
|
; note how r1, r2 and r3 are not clobbered in this function, so 16x16
|
||||||
; can simply call this 2x2x (and that's why we access rsp+gprsize
|
; can simply call this 2x2x (and that's why we access rsp+gprsize
|
||||||
@@ -280,7 +280,7 @@ INIT_XMM ssse3
|
|||||||
HADAMARD8_DIFF 9
|
HADAMARD8_DIFF 9
|
||||||
|
|
||||||
; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
; int line_size, int h)
|
; ptrdiff_t line_size, int h)
|
||||||
|
|
||||||
%macro SUM_SQUARED_ERRORS 1
|
%macro SUM_SQUARED_ERRORS 1
|
||||||
cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
|
cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
|
||||||
@@ -395,7 +395,7 @@ INIT_XMM ssse3
|
|||||||
SUM_ABS_DCTELEM 6, 2
|
SUM_ABS_DCTELEM 6, 2
|
||||||
|
|
||||||
;------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------
|
||||||
; int ff_hf_noise*_mmx(uint8_t *pix1, int lsize, int h)
|
; int ff_hf_noise*_mmx(uint8_t *pix1, ptrdiff_t lsize, int h)
|
||||||
;------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------
|
||||||
; %1 = 8/16. %2-5=m#
|
; %1 = 8/16. %2-5=m#
|
||||||
%macro HF_NOISE_PART1 5
|
%macro HF_NOISE_PART1 5
|
||||||
@@ -437,7 +437,6 @@ SUM_ABS_DCTELEM 6, 2
|
|||||||
; %1 = 8/16
|
; %1 = 8/16
|
||||||
%macro HF_NOISE 1
|
%macro HF_NOISE 1
|
||||||
cglobal hf_noise%1, 3,3,0, pix1, lsize, h
|
cglobal hf_noise%1, 3,3,0, pix1, lsize, h
|
||||||
movsxdifnidn lsizeq, lsized
|
|
||||||
sub hd, 2
|
sub hd, 2
|
||||||
pxor m7, m7
|
pxor m7, m7
|
||||||
pxor m6, m6
|
pxor m6, m6
|
||||||
@@ -472,7 +471,7 @@ HF_NOISE 8
|
|||||||
HF_NOISE 16
|
HF_NOISE 16
|
||||||
|
|
||||||
;---------------------------------------------------------------------------------------
|
;---------------------------------------------------------------------------------------
|
||||||
;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
|
;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
|
||||||
;---------------------------------------------------------------------------------------
|
;---------------------------------------------------------------------------------------
|
||||||
;%1 = 8/16
|
;%1 = 8/16
|
||||||
%macro SAD 1
|
%macro SAD 1
|
||||||
@@ -527,7 +526,7 @@ INIT_XMM sse2
|
|||||||
SAD 16
|
SAD 16
|
||||||
|
|
||||||
;------------------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------------------
|
||||||
;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
|
;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
|
||||||
;------------------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------------------
|
||||||
;%1 = 8/16
|
;%1 = 8/16
|
||||||
%macro SAD_X2 1
|
%macro SAD_X2 1
|
||||||
@@ -604,7 +603,7 @@ INIT_XMM sse2
|
|||||||
SAD_X2 16
|
SAD_X2 16
|
||||||
|
|
||||||
;------------------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------------------
|
||||||
;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
|
;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
|
||||||
;------------------------------------------------------------------------------------------
|
;------------------------------------------------------------------------------------------
|
||||||
;%1 = 8/16
|
;%1 = 8/16
|
||||||
%macro SAD_Y2 1
|
%macro SAD_Y2 1
|
||||||
@@ -674,7 +673,7 @@ INIT_XMM sse2
|
|||||||
SAD_Y2 16
|
SAD_Y2 16
|
||||||
|
|
||||||
;-------------------------------------------------------------------------------------------
|
;-------------------------------------------------------------------------------------------
|
||||||
;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int stride, int h);
|
;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
|
||||||
;-------------------------------------------------------------------------------------------
|
;-------------------------------------------------------------------------------------------
|
||||||
;%1 = 8/16
|
;%1 = 8/16
|
||||||
%macro SAD_APPROX_XY2 1
|
%macro SAD_APPROX_XY2 1
|
||||||
@@ -776,7 +775,7 @@ SAD_APPROX_XY2 16
|
|||||||
|
|
||||||
;--------------------------------------------------------------------
|
;--------------------------------------------------------------------
|
||||||
;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
; int line_size, int h);
|
; ptrdiff_t line_size, int h);
|
||||||
;--------------------------------------------------------------------
|
;--------------------------------------------------------------------
|
||||||
; %1 = 8/16
|
; %1 = 8/16
|
||||||
%macro VSAD_INTRA 1
|
%macro VSAD_INTRA 1
|
||||||
@@ -837,7 +836,7 @@ VSAD_INTRA 16
|
|||||||
|
|
||||||
;---------------------------------------------------------------------
|
;---------------------------------------------------------------------
|
||||||
;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
; int line_size, int h);
|
; ptrdiff_t line_size, int h);
|
||||||
;---------------------------------------------------------------------
|
;---------------------------------------------------------------------
|
||||||
; %1 = 8/16
|
; %1 = 8/16
|
||||||
%macro VSAD_APPROX 1
|
%macro VSAD_APPROX 1
|
||||||
|
@@ -34,55 +34,55 @@ int ff_sum_abs_dctelem_mmxext(int16_t *block);
|
|||||||
int ff_sum_abs_dctelem_sse2(int16_t *block);
|
int ff_sum_abs_dctelem_sse2(int16_t *block);
|
||||||
int ff_sum_abs_dctelem_ssse3(int16_t *block);
|
int ff_sum_abs_dctelem_ssse3(int16_t *block);
|
||||||
int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_hf_noise8_mmx(uint8_t *pix1, int lsize, int h);
|
int ff_hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
|
||||||
int ff_hf_noise16_mmx(uint8_t *pix1, int lsize, int h);
|
int ff_hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
|
||||||
int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int stride, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h);
|
ptrdiff_t stride, int h);
|
||||||
|
|
||||||
#define hadamard_func(cpu) \
|
#define hadamard_func(cpu) \
|
||||||
int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
|
int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
|
||||||
uint8_t *src2, int stride, int h); \
|
uint8_t *src2, ptrdiff_t stride, int h); \
|
||||||
int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
|
int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
|
||||||
uint8_t *src2, int stride, int h);
|
uint8_t *src2, ptrdiff_t stride, int h);
|
||||||
|
|
||||||
hadamard_func(mmx)
|
hadamard_func(mmx)
|
||||||
hadamard_func(mmxext)
|
hadamard_func(mmxext)
|
||||||
@@ -91,16 +91,16 @@ hadamard_func(ssse3)
|
|||||||
|
|
||||||
#if HAVE_YASM
|
#if HAVE_YASM
|
||||||
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int score1, score2;
|
int score1, score2;
|
||||||
|
|
||||||
if (c)
|
if (c)
|
||||||
score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
|
score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
|
||||||
else
|
else
|
||||||
score1 = ff_sse16_mmx(c, pix1, pix2, line_size, h);
|
score1 = ff_sse16_mmx(c, pix1, pix2, stride, h);
|
||||||
score2 = ff_hf_noise16_mmx(pix1, line_size, h) + ff_hf_noise8_mmx(pix1+8, line_size, h)
|
score2 = ff_hf_noise16_mmx(pix1, stride, h) + ff_hf_noise8_mmx(pix1+8, stride, h)
|
||||||
- ff_hf_noise16_mmx(pix2, line_size, h) - ff_hf_noise8_mmx(pix2+8, line_size, h);
|
- ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h);
|
||||||
|
|
||||||
if (c)
|
if (c)
|
||||||
return score1 + FFABS(score2) * c->avctx->nsse_weight;
|
return score1 + FFABS(score2) * c->avctx->nsse_weight;
|
||||||
@@ -109,11 +109,11 @@ static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int score1 = ff_sse8_mmx(c, pix1, pix2, line_size, h);
|
int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
|
||||||
int score2 = ff_hf_noise8_mmx(pix1, line_size, h) -
|
int score2 = ff_hf_noise8_mmx(pix1, stride, h) -
|
||||||
ff_hf_noise8_mmx(pix2, line_size, h);
|
ff_hf_noise8_mmx(pix2, stride, h);
|
||||||
|
|
||||||
if (c)
|
if (c)
|
||||||
return score1 + FFABS(score2) * c->avctx->nsse_weight;
|
return score1 + FFABS(score2) * c->avctx->nsse_weight;
|
||||||
@@ -126,12 +126,12 @@ static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
|
|||||||
#if HAVE_INLINE_ASM
|
#if HAVE_INLINE_ASM
|
||||||
|
|
||||||
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
|
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
|
|
||||||
av_assert2((((int) pix) & 7) == 0);
|
av_assert2((((int) pix) & 7) == 0);
|
||||||
av_assert2((line_size & 7) == 0);
|
av_assert2((stride & 7) == 0);
|
||||||
|
|
||||||
#define SUM(in0, in1, out0, out1) \
|
#define SUM(in0, in1, out0, out1) \
|
||||||
"movq (%0), %%mm2\n" \
|
"movq (%0), %%mm2\n" \
|
||||||
@@ -182,7 +182,7 @@ static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
|
|||||||
"paddw %%mm6, %%mm0\n"
|
"paddw %%mm6, %%mm0\n"
|
||||||
"movd %%mm0, %1\n"
|
"movd %%mm0, %1\n"
|
||||||
: "+r" (pix), "=r" (tmp)
|
: "+r" (pix), "=r" (tmp)
|
||||||
: "r" ((x86_reg) line_size), "m" (h)
|
: "r" (stride), "m" (h)
|
||||||
: "%ecx");
|
: "%ecx");
|
||||||
|
|
||||||
return tmp & 0xFFFF;
|
return tmp & 0xFFFF;
|
||||||
@@ -190,13 +190,13 @@ static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
|
|||||||
#undef SUM
|
#undef SUM
|
||||||
|
|
||||||
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
||||||
int line_size, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
|
|
||||||
av_assert2((((int) pix1) & 7) == 0);
|
av_assert2((((int) pix1) & 7) == 0);
|
||||||
av_assert2((((int) pix2) & 7) == 0);
|
av_assert2((((int) pix2) & 7) == 0);
|
||||||
av_assert2((line_size & 7) == 0);
|
av_assert2((stride & 7) == 0);
|
||||||
|
|
||||||
#define SUM(in0, in1, out0, out1) \
|
#define SUM(in0, in1, out0, out1) \
|
||||||
"movq (%0), %%mm2\n" \
|
"movq (%0), %%mm2\n" \
|
||||||
@@ -263,7 +263,7 @@ static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
|
|||||||
"paddw %%mm6, %%mm0\n"
|
"paddw %%mm6, %%mm0\n"
|
||||||
"movd %%mm0, %2\n"
|
"movd %%mm0, %2\n"
|
||||||
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
|
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
|
||||||
: "r" ((x86_reg) line_size), "m" (h)
|
: "r" (stride), "m" (h)
|
||||||
: "%ecx");
|
: "%ecx");
|
||||||
|
|
||||||
return tmp & 0x7FFF;
|
return tmp & 0x7FFF;
|
||||||
@@ -276,9 +276,10 @@ DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
|
|||||||
0x0002000200020002ULL,
|
0x0002000200020002ULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
x86_reg len = -(x86_reg)stride * h;
|
x86_reg len = -stride * h;
|
||||||
__asm__ volatile (
|
__asm__ volatile (
|
||||||
".p2align 4 \n\t"
|
".p2align 4 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
@@ -308,13 +309,13 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
|||||||
"add %3, %%"REG_a" \n\t"
|
"add %3, %%"REG_a" \n\t"
|
||||||
" js 1b \n\t"
|
" js 1b \n\t"
|
||||||
: "+a" (len)
|
: "+a" (len)
|
||||||
: "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
|
: "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
|
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
|
||||||
int stride, int h)
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
x86_reg len = -(x86_reg)stride * h;
|
x86_reg len = -stride * h;
|
||||||
__asm__ volatile (
|
__asm__ volatile (
|
||||||
".p2align 4 \n\t"
|
".p2align 4 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
@@ -347,12 +348,13 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
|
|||||||
" js 1b \n\t"
|
" js 1b \n\t"
|
||||||
: "+a" (len)
|
: "+a" (len)
|
||||||
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
|
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
|
||||||
"r" ((x86_reg) stride));
|
"r" (stride));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
x86_reg len = -(x86_reg)stride * h;
|
x86_reg len = -stride * h;
|
||||||
__asm__ volatile (
|
__asm__ volatile (
|
||||||
"movq (%1, %%"REG_a"), %%mm0 \n\t"
|
"movq (%1, %%"REG_a"), %%mm0 \n\t"
|
||||||
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
|
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
|
||||||
@@ -400,7 +402,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
|||||||
" js 1b \n\t"
|
" js 1b \n\t"
|
||||||
: "+a" (len)
|
: "+a" (len)
|
||||||
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
|
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
|
||||||
"r" ((x86_reg) stride), "m" (round_tab[2]));
|
"r" (stride), "m" (round_tab[2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int sum_mmx(void)
|
static inline int sum_mmx(void)
|
||||||
@@ -418,19 +420,21 @@ static inline int sum_mmx(void)
|
|||||||
return ret & 0xFFFF;
|
return ret & 0xFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
|
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
|
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
|
||||||
|
ptrdiff_t stride, int h)
|
||||||
{
|
{
|
||||||
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
|
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PIX_SAD(suf) \
|
#define PIX_SAD(suf) \
|
||||||
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
av_assert2(h == 8); \
|
av_assert2(h == 8); \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
@@ -444,7 +448,7 @@ static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
av_assert2(h == 8); \
|
av_assert2(h == 8); \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
@@ -459,7 +463,7 @@ static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
av_assert2(h == 8); \
|
av_assert2(h == 8); \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
@@ -474,7 +478,7 @@ static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
av_assert2(h == 8); \
|
av_assert2(h == 8); \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
@@ -488,7 +492,7 @@ static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
"pxor %%mm7, %%mm7 \n\t" \
|
"pxor %%mm7, %%mm7 \n\t" \
|
||||||
@@ -502,7 +506,7 @@ static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
"pxor %%mm7, %%mm7 \n\t" \
|
"pxor %%mm7, %%mm7 \n\t" \
|
||||||
@@ -517,7 +521,7 @@ static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
"pxor %%mm7, %%mm7 \n\t" \
|
"pxor %%mm7, %%mm7 \n\t" \
|
||||||
@@ -532,7 +536,7 @@ static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
|
||||||
uint8_t *blk1, int stride, int h) \
|
uint8_t *blk1, ptrdiff_t stride, int h) \
|
||||||
{ \
|
{ \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
"pxor %%mm7, %%mm7 \n\t" \
|
"pxor %%mm7, %%mm7 \n\t" \
|
||||||
|
Reference in New Issue
Block a user