1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-05-24 13:42:56 +02:00

vp9mc/x86: rename ff_* to ff_vp9_*

Signed-off-by: Anton Khirnov <anton@khirnov.net>
This commit is contained in:
Clément Bœsch 2014-03-28 22:33:51 +01:00 committed by Anton Khirnov
parent 8be8444d01
commit 3cda179f18
2 changed files with 54 additions and 54 deletions

View File

@ -30,7 +30,7 @@
#if HAVE_YASM #if HAVE_YASM
#define fpel_func(avg, sz, opt) \ #define fpel_func(avg, sz, opt) \
void ff_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \ void ff_vp9_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \
ptrdiff_t dst_stride, \ ptrdiff_t dst_stride, \
ptrdiff_t src_stride, \ ptrdiff_t src_stride, \
int h, int mx, int my) int h, int mx, int my)
@ -49,7 +49,7 @@ fpel_func(avg, 64, sse2);
#define mc_func(avg, sz, dir, opt) \ #define mc_func(avg, sz, dir, opt) \
void \ void \
ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \
const uint8_t *src, \ const uint8_t *src, \
ptrdiff_t dst_stride, \ ptrdiff_t dst_stride, \
ptrdiff_t src_stride, \ ptrdiff_t src_stride, \
@ -73,19 +73,19 @@ mc_funcs(16);
#define mc_rep_func(avg, sz, hsz, dir, opt) \ #define mc_rep_func(avg, sz, hsz, dir, opt) \
static av_always_inline void \ static av_always_inline void \
ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \
const uint8_t *src, \ const uint8_t *src, \
ptrdiff_t dst_stride, \ ptrdiff_t dst_stride, \
ptrdiff_t src_stride, \ ptrdiff_t src_stride, \
int h, \ int h, \
const int8_t (*filter)[16]) \ const int8_t (*filter)[16]) \
{ \ { \
ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \
dst_stride, \ dst_stride, \
src_stride, \ src_stride, \
h, \ h, \
filter); \ filter); \
ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \
src + hsz, \ src + hsz, \
dst_stride, \ dst_stride, \
src_stride, \ src_stride, \
@ -118,11 +118,11 @@ op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \
int h, int mx, int my) \ int h, int mx, int my) \
{ \ { \
LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \
ff_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ ff_vp9_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \
64, src_stride, \ 64, src_stride, \
h + 7, \ h + 7, \
ff_filters_ssse3[f][mx - 1]); \ ff_filters_ssse3[f][mx - 1]); \
ff_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \
dst_stride, 64, \ dst_stride, 64, \
h, \ h, \
ff_filters_ssse3[f][my - 1]); \ ff_filters_ssse3[f][my - 1]); \
@ -156,9 +156,9 @@ op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \
int h, int mx, \ int h, int mx, \
int my) \ int my) \
{ \ { \
ff_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \
dst_stride, \ dst_stride, \
src_stride, h, \ src_stride, h,\
ff_filters_ssse3[f][dvar - 1]); \ ff_filters_ssse3[f][dvar - 1]); \
} }
@ -197,7 +197,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_ ## type ## sz ## _ ## opt dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_ ## type ## sz ## _ ## opt
#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \ #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \

View File

@ -86,7 +86,7 @@ SECTION .text
%macro filter_h_fn 1 %macro filter_h_fn 1
%assign %%px mmsize/2 %assign %%px mmsize/2
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery
mova m6, [pw_256] mova m6, [pw_256]
mova m7, [filteryq+ 0] mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8 %if ARCH_X86_64 && mmsize > 8
@ -147,7 +147,7 @@ filter_h_fn avg
%if ARCH_X86_64 %if ARCH_X86_64
%macro filter_hx2_fn 1 %macro filter_hx2_fn 1
%assign %%px mmsize %assign %%px mmsize
cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery
mova m13, [pw_256] mova m13, [pw_256]
mova m8, [filteryq+ 0] mova m8, [filteryq+ 0]
mova m9, [filteryq+16] mova m9, [filteryq+16]
@ -203,9 +203,9 @@ filter_hx2_fn avg
%macro filter_v_fn 1 %macro filter_v_fn 1
%assign %%px mmsize/2 %assign %%px mmsize/2
%if ARCH_X86_64 %if ARCH_X86_64
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3
%else %else
cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3
mov filteryq, r5mp mov filteryq, r5mp
%define hd r4mp %define hd r4mp
%endif %endif
@ -276,7 +276,7 @@ filter_v_fn avg
%macro filter_vx2_fn 1 %macro filter_vx2_fn 1
%assign %%px mmsize %assign %%px mmsize
cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3
mova m13, [pw_256] mova m13, [pw_256]
lea sstride3q, [sstrideq*3] lea sstride3q, [sstrideq*3]
lea src4q, [srcq+sstrideq] lea src4q, [srcq+sstrideq]
@ -346,11 +346,11 @@ filter_vx2_fn avg
%endif %endif
%if %2 <= 16 %if %2 <= 16
cglobal %1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 cglobal vp9_%1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3
lea sstride3q, [sstrideq*3] lea sstride3q, [sstrideq*3]
lea dstride3q, [dstrideq*3] lea dstride3q, [dstrideq*3]
%else %else
cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h cglobal vp9_%1%2, 5, 5, 4, dst, src, dstride, sstride, h
%endif %endif
.loop: .loop:
%%srcfn m0, [srcq] %%srcfn m0, [srcq]