2014-04-28 17:12:28 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013 Seppo Tomperi
|
|
|
|
* Copyright (c) 2013 - 2014 Pierre-Edouard Lepere
|
|
|
|
*
|
2014-07-27 01:28:44 +03:00
|
|
|
* This file is part of FFmpeg.
|
2014-04-28 17:12:28 +03:00
|
|
|
*
|
2014-07-27 01:28:44 +03:00
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
2014-04-28 17:12:28 +03:00
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
2014-07-27 01:28:44 +03:00
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
2014-04-28 17:12:28 +03:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2014-07-27 01:28:44 +03:00
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
2014-04-28 17:12:28 +03:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
2017-01-31 18:02:24 +02:00
|
|
|
|
2014-04-28 17:12:28 +03:00
|
|
|
#include "libavutil/cpu.h"
|
|
|
|
#include "libavutil/x86/asm.h"
|
|
|
|
#include "libavutil/x86/cpu.h"
|
|
|
|
#include "libavcodec/get_bits.h" /* required for hevcdsp.h GetBitContext */
|
|
|
|
#include "libavcodec/hevcdsp.h"
|
|
|
|
#include "libavcodec/x86/hevcdsp.h"
|
|
|
|
|
2014-06-18 06:57:16 +03:00
|
|
|
#define LFC_FUNC(DIR, DEPTH, OPT) \
|
|
|
|
void ff_hevc_ ## DIR ## _loop_filter_chroma_ ## DEPTH ## _ ## OPT(uint8_t *pix, ptrdiff_t stride, int *tc, uint8_t *no_p, uint8_t *no_q);
|
2014-04-28 17:12:28 +03:00
|
|
|
|
2014-06-18 06:57:16 +03:00
|
|
|
#define LFL_FUNC(DIR, DEPTH, OPT) \
|
|
|
|
void ff_hevc_ ## DIR ## _loop_filter_luma_ ## DEPTH ## _ ## OPT(uint8_t *pix, ptrdiff_t stride, int beta, int *tc, uint8_t *no_p, uint8_t *no_q);
|
2014-05-15 17:31:11 +03:00
|
|
|
|
2014-07-13 09:00:50 +03:00
|
|
|
#define LFC_FUNCS(type, depth, opt) \
|
2014-07-27 01:19:25 +03:00
|
|
|
LFC_FUNC(h, depth, opt) \
|
|
|
|
LFC_FUNC(v, depth, opt)
|
2014-05-15 17:31:11 +03:00
|
|
|
|
2014-07-13 09:00:50 +03:00
|
|
|
#define LFL_FUNCS(type, depth, opt) \
|
2014-07-27 01:19:25 +03:00
|
|
|
LFL_FUNC(h, depth, opt) \
|
|
|
|
LFL_FUNC(v, depth, opt)
|
2014-05-15 17:31:11 +03:00
|
|
|
|
2014-07-13 09:00:50 +03:00
|
|
|
LFC_FUNCS(uint8_t, 8, sse2)
|
|
|
|
LFC_FUNCS(uint8_t, 10, sse2)
|
2014-07-25 18:55:40 +03:00
|
|
|
LFC_FUNCS(uint8_t, 12, sse2)
|
2014-07-29 10:30:13 +03:00
|
|
|
LFC_FUNCS(uint8_t, 8, avx)
|
|
|
|
LFC_FUNCS(uint8_t, 10, avx)
|
|
|
|
LFC_FUNCS(uint8_t, 12, avx)
|
2014-07-13 09:00:50 +03:00
|
|
|
LFL_FUNCS(uint8_t, 8, sse2)
|
|
|
|
LFL_FUNCS(uint8_t, 10, sse2)
|
2014-07-25 18:55:40 +03:00
|
|
|
LFL_FUNCS(uint8_t, 12, sse2)
|
2014-07-13 09:00:50 +03:00
|
|
|
LFL_FUNCS(uint8_t, 8, ssse3)
|
|
|
|
LFL_FUNCS(uint8_t, 10, ssse3)
|
2014-07-25 18:55:40 +03:00
|
|
|
LFL_FUNCS(uint8_t, 12, ssse3)
|
2014-07-29 10:30:13 +03:00
|
|
|
LFL_FUNCS(uint8_t, 8, avx)
|
|
|
|
LFL_FUNCS(uint8_t, 10, avx)
|
|
|
|
LFL_FUNCS(uint8_t, 12, avx)
|
2014-05-15 17:31:11 +03:00
|
|
|
|
2014-07-26 10:47:14 +03:00
|
|
|
#define IDCT_FUNCS(W, opt) \
|
2016-06-29 11:56:42 +02:00
|
|
|
void ff_hevc_idct_ ## W ## _dc_8_ ## opt(int16_t *coeffs); \
|
2017-01-31 17:50:21 +02:00
|
|
|
void ff_hevc_idct_ ## W ## _dc_10_ ## opt(int16_t *coeffs); \
|
|
|
|
void ff_hevc_idct_ ## W ## _dc_12_ ## opt(int16_t *coeffs)
|
2014-07-26 10:47:14 +03:00
|
|
|
|
|
|
|
IDCT_FUNCS(4x4, mmxext);
|
|
|
|
IDCT_FUNCS(8x8, mmxext);
|
|
|
|
IDCT_FUNCS(8x8, sse2);
|
|
|
|
IDCT_FUNCS(16x16, sse2);
|
|
|
|
IDCT_FUNCS(32x32, sse2);
|
|
|
|
IDCT_FUNCS(16x16, avx2);
|
|
|
|
IDCT_FUNCS(32x32, avx2);
|
2014-06-16 15:47:21 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_rep_func(name, bitd, step, W, opt) \
|
2014-07-28 20:17:26 +03:00
|
|
|
void ff_hevc_put_hevc_##name##W##_##bitd##_##opt(int16_t *_dst, \
|
2014-04-28 17:12:28 +03:00
|
|
|
uint8_t *_src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
uint8_t *src; \
|
|
|
|
int16_t *dst; \
|
|
|
|
for (i = 0; i < W; i += step) { \
|
|
|
|
src = _src + (i * ((bitd + 7) / 8)); \
|
|
|
|
dst = _dst + i; \
|
2014-07-28 20:17:26 +03:00
|
|
|
ff_hevc_put_hevc_##name##step##_##bitd##_##opt(dst, src, _srcstride, height, mx, my, width); \
|
2014-04-28 17:12:28 +03:00
|
|
|
} \
|
|
|
|
}
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_rep_uni_func(name, bitd, step, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_uni_##name##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t dststride, \
|
2014-04-28 17:12:28 +03:00
|
|
|
uint8_t *_src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
uint8_t *src; \
|
|
|
|
uint8_t *dst; \
|
|
|
|
for (i = 0; i < W; i += step) { \
|
|
|
|
src = _src + (i * ((bitd + 7) / 8)); \
|
|
|
|
dst = _dst + (i * ((bitd + 7) / 8)); \
|
2014-05-07 10:58:34 +03:00
|
|
|
ff_hevc_put_hevc_uni_##name##step##_##bitd##_##opt(dst, dststride, src, _srcstride, \
|
2014-04-28 17:12:28 +03:00
|
|
|
height, mx, my, width); \
|
|
|
|
} \
|
|
|
|
}
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_rep_bi_func(name, bitd, step, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_bi_##name##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t dststride, uint8_t *_src, \
|
2014-07-28 13:13:06 +03:00
|
|
|
ptrdiff_t _srcstride, int16_t* _src2, \
|
2014-04-28 17:12:28 +03:00
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
uint8_t *src; \
|
|
|
|
uint8_t *dst; \
|
|
|
|
int16_t *src2; \
|
|
|
|
for (i = 0; i < W ; i += step) { \
|
|
|
|
src = _src + (i * ((bitd + 7) / 8)); \
|
|
|
|
dst = _dst + (i * ((bitd + 7) / 8)); \
|
|
|
|
src2 = _src2 + i; \
|
2014-05-07 10:58:34 +03:00
|
|
|
ff_hevc_put_hevc_bi_##name##step##_##bitd##_##opt(dst, dststride, src, _srcstride, src2, \
|
2014-07-28 13:13:06 +03:00
|
|
|
height, mx, my, width); \
|
2014-04-28 17:12:28 +03:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_rep_funcs(name, bitd, step, W, opt) \
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_func(name, bitd, step, W, opt) \
|
|
|
|
mc_rep_uni_func(name, bitd, step, W, opt) \
|
2014-05-07 10:58:34 +03:00
|
|
|
mc_rep_bi_func(name, bitd, step, W, opt)
|
|
|
|
|
x86: hevc_mc: split differently calls
In some cases, 2 or 3 calls are performed to functions for unusual
widths. Instead, perform 2 calls for different widths to split the
workload.
The 8+16 and 4+8 widths for respectively 8 and more than 8 bits can't
be processed that way without modifications: some calls use unaligned
buffers, and having branches to handle this was resulting in no
micro-benchmark benefit.
For block_w == 12 (around 1% of the pixels of the sequence):
Before:
12758 decicycles in epel_uni, 4093 runs, 3 skips
19389 decicycles in qpel_uni, 8187 runs, 5 skips
22699 decicycles in epel_bi, 32743 runs, 25 skips
34736 decicycles in qpel_bi, 32733 runs, 35 skips
After:
11929 decicycles in epel_uni, 4096 runs, 0 skips
18131 decicycles in qpel_uni, 8184 runs, 8 skips
20065 decicycles in epel_bi, 32750 runs, 18 skips
31458 decicycles in qpel_bi, 32753 runs, 15 skips
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-08-24 11:46:30 +03:00
|
|
|
#define mc_rep_func2(name, bitd, step1, step2, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_##name##W##_##bitd##_##opt(int16_t *dst, \
|
|
|
|
uint8_t *src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_##name##step1##_##bitd##_##opt(dst, src, _srcstride, height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_##name##step2##_##bitd##_##opt(dst + step1, src + (step1 * ((bitd + 7) / 8)), \
|
|
|
|
_srcstride, height, mx, my, width); \
|
|
|
|
}
|
|
|
|
#define mc_rep_uni_func2(name, bitd, step1, step2, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_uni_##name##W##_##bitd##_##opt(uint8_t *dst, ptrdiff_t dststride, \
|
|
|
|
uint8_t *src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_uni_##name##step1##_##bitd##_##opt(dst, dststride, src, _srcstride, height, mx, my, width);\
|
|
|
|
ff_hevc_put_hevc_uni_##name##step2##_##bitd##_##opt(dst + (step1 * ((bitd + 7) / 8)), dststride, \
|
|
|
|
src + (step1 * ((bitd + 7) / 8)), _srcstride, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
}
|
|
|
|
#define mc_rep_bi_func2(name, bitd, step1, step2, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_bi_##name##W##_##bitd##_##opt(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
|
|
|
|
ptrdiff_t _srcstride, int16_t* src2, \
|
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_bi_##name##step1##_##bitd##_##opt(dst, dststride, src, _srcstride, src2, height, mx, my, width);\
|
|
|
|
ff_hevc_put_hevc_bi_##name##step2##_##bitd##_##opt(dst + (step1 * ((bitd + 7) / 8)), dststride, \
|
|
|
|
src + (step1 * ((bitd + 7) / 8)), _srcstride, \
|
|
|
|
src2 + step1, height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mc_rep_funcs2(name, bitd, step1, step2, W, opt) \
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_func2(name, bitd, step1, step2, W, opt) \
|
|
|
|
mc_rep_uni_func2(name, bitd, step1, step2, W, opt) \
|
x86: hevc_mc: split differently calls
In some cases, 2 or 3 calls are performed to functions for unusual
widths. Instead, perform 2 calls for different widths to split the
workload.
The 8+16 and 4+8 widths for respectively 8 and more than 8 bits can't
be processed that way without modifications: some calls use unaligned
buffers, and having branches to handle this was resulting in no
micro-benchmark benefit.
For block_w == 12 (around 1% of the pixels of the sequence):
Before:
12758 decicycles in epel_uni, 4093 runs, 3 skips
19389 decicycles in qpel_uni, 8187 runs, 5 skips
22699 decicycles in epel_bi, 32743 runs, 25 skips
34736 decicycles in qpel_bi, 32733 runs, 35 skips
After:
11929 decicycles in epel_uni, 4096 runs, 0 skips
18131 decicycles in qpel_uni, 8184 runs, 8 skips
20065 decicycles in epel_bi, 32750 runs, 18 skips
31458 decicycles in qpel_bi, 32753 runs, 15 skips
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-08-24 11:46:30 +03:00
|
|
|
mc_rep_bi_func2(name, bitd, step1, step2, W, opt)
|
2014-05-07 10:58:34 +03:00
|
|
|
|
2014-05-09 06:16:27 +03:00
|
|
|
#if ARCH_X86_64 && HAVE_SSE4_EXTERNAL
|
|
|
|
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
#define mc_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4) \
|
|
|
|
void ff_hevc_put_hevc_##name##width1##_10_##opt1(int16_t *dst, uint8_t *src, ptrdiff_t _srcstride, \
|
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
\
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_##name##width2##_10_##opt1(dst, src, _srcstride, height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_##name##width3##_10_##opt2(dst+ width2, src+ width4, _srcstride, height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mc_bi_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4) \
|
|
|
|
void ff_hevc_put_hevc_bi_##name##width1##_10_##opt1(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
|
|
|
|
ptrdiff_t _srcstride, int16_t *src2, \
|
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_bi_##name##width2##_10_##opt1(dst, dststride, src, _srcstride, src2, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_bi_##name##width3##_10_##opt2(dst+width4, dststride, src+width4, _srcstride, src2+width2,\
|
|
|
|
height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mc_uni_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4) \
|
|
|
|
void ff_hevc_put_hevc_uni_##name##width1##_10_##opt1(uint8_t *dst, ptrdiff_t dststride, \
|
|
|
|
uint8_t *src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_uni_##name##width2##_10_##opt1(dst, dststride, src, _srcstride, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_uni_##name##width3##_10_##opt2(dst+width4, dststride, src+width4, _srcstride, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
#define mc_rep_mixs_10(name, width1, width2, width3, opt1, opt2, width4) \
|
|
|
|
mc_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4) \
|
|
|
|
mc_bi_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4) \
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
mc_uni_rep_mix_10(name, width1, width2, width3, opt1, opt2, width4)
|
|
|
|
|
|
|
|
#define mc_rep_mix_8(name, width1, width2, width3, opt1, opt2) \
|
|
|
|
void ff_hevc_put_hevc_##name##width1##_8_##opt1(int16_t *dst, uint8_t *src, ptrdiff_t _srcstride, \
|
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
\
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_##name##width2##_8_##opt1(dst, src, _srcstride, height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_##name##width3##_8_##opt2(dst+ width2, src+ width2, _srcstride, height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mc_bi_rep_mix_8(name, width1, width2, width3, opt1, opt2) \
|
|
|
|
void ff_hevc_put_hevc_bi_##name##width1##_8_##opt1(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
|
|
|
|
ptrdiff_t _srcstride, int16_t* src2, \
|
|
|
|
int height, intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_bi_##name##width2##_8_##opt1(dst, dststride, src, _srcstride, \
|
|
|
|
src2, height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_bi_##name##width3##_8_##opt2(dst+width2, dststride, src+width2, _srcstride, \
|
|
|
|
src2+width2, height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mc_uni_rep_mix_8(name, width1, width2, width3, opt1, opt2) \
|
|
|
|
void ff_hevc_put_hevc_uni_##name##width1##_8_##opt1(uint8_t *dst, ptrdiff_t dststride, \
|
|
|
|
uint8_t *src, ptrdiff_t _srcstride, int height, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
|
|
|
ff_hevc_put_hevc_uni_##name##width2##_8_##opt1(dst, dststride, src, _srcstride, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
ff_hevc_put_hevc_uni_##name##width3##_8_##opt2(dst+width2, dststride, src+width2, _srcstride, \
|
|
|
|
height, mx, my, width); \
|
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
#define mc_rep_mixs_8(name, width1, width2, width3, opt1, opt2) \
|
|
|
|
mc_rep_mix_8(name, width1, width2, width3, opt1, opt2) \
|
|
|
|
mc_bi_rep_mix_8(name, width1, width2, width3, opt1, opt2) \
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
mc_uni_rep_mix_8(name, width1, width2, width3, opt1, opt2)
|
|
|
|
|
|
|
|
#if HAVE_AVX2_EXTERNAL
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_mixs_8(pel_pixels, 48, 32, 16, avx2, sse4)
|
|
|
|
mc_rep_mixs_8(epel_hv, 48, 32, 16, avx2, sse4)
|
|
|
|
mc_rep_mixs_8(epel_h , 48, 32, 16, avx2, sse4)
|
|
|
|
mc_rep_mixs_8(epel_v , 48, 32, 16, avx2, sse4)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_mix_10(pel_pixels, 24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_bi_rep_mix_10(pel_pixels,24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_rep_mixs_10(epel_hv, 24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_rep_mixs_10(epel_h , 24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_rep_mixs_10(epel_v , 24, 16, 8, avx2, sse4, 32)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_mixs_10(qpel_h , 24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_rep_mixs_10(qpel_v , 24, 16, 8, avx2, sse4, 32)
|
|
|
|
mc_rep_mixs_10(qpel_hv, 24, 16, 8, avx2, sse4, 32)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_uni_func(pel_pixels, 8, 64, 128, avx2)//used for 10bit
|
|
|
|
mc_rep_uni_func(pel_pixels, 8, 32, 96, avx2) //used for 10bit
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(pel_pixels, 8, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_func(pel_pixels, 10, 16, 32, avx2)
|
|
|
|
mc_rep_func(pel_pixels, 10, 16, 48, avx2)
|
|
|
|
mc_rep_func(pel_pixels, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_bi_func(pel_pixels, 10, 16, 32, avx2)
|
|
|
|
mc_rep_bi_func(pel_pixels, 10, 16, 48, avx2)
|
|
|
|
mc_rep_bi_func(pel_pixels, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_h, 8, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_v, 8, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_h, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(epel_h, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(epel_h, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_v, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(epel_v, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(epel_v, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_hv, 8, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(epel_hv, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(epel_hv, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(epel_hv, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(qpel_h, 8, 32, 64, avx2)
|
|
|
|
mc_rep_mixs_8(qpel_h , 48, 32, 16, avx2, sse4)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(qpel_v, 8, 32, 64, avx2)
|
|
|
|
mc_rep_mixs_8(qpel_v, 48, 32, 16, avx2, sse4)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(qpel_h, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(qpel_h, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(qpel_h, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(qpel_v, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(qpel_v, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(qpel_v, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(qpel_hv, 10, 16, 32, avx2)
|
|
|
|
mc_rep_funcs(qpel_hv, 10, 16, 48, avx2)
|
|
|
|
mc_rep_funcs(qpel_hv, 10, 32, 64, avx2)
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
|
|
|
|
#endif //AVX2
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_funcs(pel_pixels, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(pel_pixels,12, 4, 12, sse4)
|
|
|
|
|
|
|
|
mc_rep_funcs(epel_h, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_h, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_h, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_h, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_h,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_h,12, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_v, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_v, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_v, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_v, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_v,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_v,12, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_hv, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_hv, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_hv, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_hv, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs2(epel_hv,8, 8, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(epel_hv,12, 4, 12, sse4)
|
|
|
|
|
|
|
|
mc_rep_funcs(qpel_h, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_h, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_h, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_h, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_h,12, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_v, 8, 16, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_v, 8, 16, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_v, 8, 16, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_v, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_v,12, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv, 8, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv, 8, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv, 8, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv, 8, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv, 8, 8, 16, sse4)
|
|
|
|
mc_rep_funcs2(qpel_hv,8, 8, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,10, 4, 12, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 8, 64, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 8, 48, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 8, 32, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 8, 24, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 8, 16, sse4)
|
|
|
|
mc_rep_funcs(qpel_hv,12, 4, 12, sse4)
|
2014-05-07 10:58:34 +03:00
|
|
|
|
|
|
|
#define mc_rep_uni_w(bitd, step, W, opt) \
|
2015-02-05 13:37:52 +02:00
|
|
|
void ff_hevc_put_hevc_uni_w##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t dststride, int16_t *_src, \
|
2014-04-28 17:12:28 +03:00
|
|
|
int height, int denom, int _wx, int _ox) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
int16_t *src; \
|
|
|
|
uint8_t *dst; \
|
|
|
|
for (i = 0; i < W; i += step) { \
|
|
|
|
src= _src + i; \
|
|
|
|
dst= _dst + (i * ((bitd + 7) / 8)); \
|
2015-02-05 13:37:52 +02:00
|
|
|
ff_hevc_put_hevc_uni_w##step##_##bitd##_##opt(dst, dststride, src, \
|
2014-04-28 17:12:28 +03:00
|
|
|
height, denom, _wx, _ox); \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_uni_w(8, 6, 12, sse4)
|
|
|
|
mc_rep_uni_w(8, 8, 16, sse4)
|
|
|
|
mc_rep_uni_w(8, 8, 24, sse4)
|
|
|
|
mc_rep_uni_w(8, 8, 32, sse4)
|
|
|
|
mc_rep_uni_w(8, 8, 48, sse4)
|
|
|
|
mc_rep_uni_w(8, 8, 64, sse4)
|
|
|
|
|
|
|
|
mc_rep_uni_w(10, 6, 12, sse4)
|
|
|
|
mc_rep_uni_w(10, 8, 16, sse4)
|
|
|
|
mc_rep_uni_w(10, 8, 24, sse4)
|
|
|
|
mc_rep_uni_w(10, 8, 32, sse4)
|
|
|
|
mc_rep_uni_w(10, 8, 48, sse4)
|
|
|
|
mc_rep_uni_w(10, 8, 64, sse4)
|
|
|
|
|
|
|
|
mc_rep_uni_w(12, 6, 12, sse4)
|
|
|
|
mc_rep_uni_w(12, 8, 16, sse4)
|
|
|
|
mc_rep_uni_w(12, 8, 24, sse4)
|
|
|
|
mc_rep_uni_w(12, 8, 32, sse4)
|
|
|
|
mc_rep_uni_w(12, 8, 48, sse4)
|
|
|
|
mc_rep_uni_w(12, 8, 64, sse4)
|
2014-07-25 19:55:23 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_rep_bi_w(bitd, step, W, opt) \
|
2015-02-05 13:37:52 +02:00
|
|
|
void ff_hevc_put_hevc_bi_w##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t dststride, int16_t *_src, \
|
2014-07-28 13:13:06 +03:00
|
|
|
int16_t *_src2, int height, \
|
2014-04-28 17:12:28 +03:00
|
|
|
int denom, int _wx0, int _wx1, int _ox0, int _ox1) \
|
|
|
|
{ \
|
|
|
|
int i; \
|
|
|
|
int16_t *src; \
|
|
|
|
int16_t *src2; \
|
|
|
|
uint8_t *dst; \
|
|
|
|
for (i = 0; i < W; i += step) { \
|
|
|
|
src = _src + i; \
|
|
|
|
src2 = _src2 + i; \
|
|
|
|
dst = _dst + (i * ((bitd + 7) / 8)); \
|
2015-02-05 13:37:52 +02:00
|
|
|
ff_hevc_put_hevc_bi_w##step##_##bitd##_##opt(dst, dststride, src, src2, \
|
|
|
|
height, denom, _wx0, _wx1, _ox0, _ox1); \
|
2014-04-28 17:12:28 +03:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_rep_bi_w(8, 6, 12, sse4)
|
|
|
|
mc_rep_bi_w(8, 8, 16, sse4)
|
|
|
|
mc_rep_bi_w(8, 8, 24, sse4)
|
|
|
|
mc_rep_bi_w(8, 8, 32, sse4)
|
|
|
|
mc_rep_bi_w(8, 8, 48, sse4)
|
|
|
|
mc_rep_bi_w(8, 8, 64, sse4)
|
|
|
|
|
|
|
|
mc_rep_bi_w(10, 6, 12, sse4)
|
|
|
|
mc_rep_bi_w(10, 8, 16, sse4)
|
|
|
|
mc_rep_bi_w(10, 8, 24, sse4)
|
|
|
|
mc_rep_bi_w(10, 8, 32, sse4)
|
|
|
|
mc_rep_bi_w(10, 8, 48, sse4)
|
|
|
|
mc_rep_bi_w(10, 8, 64, sse4)
|
|
|
|
|
|
|
|
mc_rep_bi_w(12, 6, 12, sse4)
|
|
|
|
mc_rep_bi_w(12, 8, 16, sse4)
|
|
|
|
mc_rep_bi_w(12, 8, 24, sse4)
|
|
|
|
mc_rep_bi_w(12, 8, 32, sse4)
|
|
|
|
mc_rep_bi_w(12, 8, 48, sse4)
|
|
|
|
mc_rep_bi_w(12, 8, 64, sse4)
|
2014-07-25 19:55:23 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
#define mc_uni_w_func(name, bitd, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_uni_w_##name##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t _dststride, \
|
2014-04-28 17:12:28 +03:00
|
|
|
uint8_t *_src, ptrdiff_t _srcstride, \
|
|
|
|
int height, int denom, \
|
|
|
|
int _wx, int _ox, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
2014-07-28 20:17:26 +03:00
|
|
|
LOCAL_ALIGNED_16(int16_t, temp, [71 * MAX_PB_SIZE]); \
|
|
|
|
ff_hevc_put_hevc_##name##W##_##bitd##_##opt(temp, _src, _srcstride, height, mx, my, width); \
|
2015-02-05 13:37:52 +02:00
|
|
|
ff_hevc_put_hevc_uni_w##W##_##bitd##_##opt(_dst, _dststride, temp, height, denom, _wx, _ox);\
|
2014-04-28 17:12:28 +03:00
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
#define mc_uni_w_funcs(name, bitd, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 4, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 8, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 12, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 16, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 24, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 32, opt) \
|
|
|
|
mc_uni_w_func(name, bitd, 48, opt) \
|
2014-05-07 10:58:34 +03:00
|
|
|
mc_uni_w_func(name, bitd, 64, opt)
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_uni_w_funcs(pel_pixels, 8, sse4)
|
|
|
|
mc_uni_w_func(pel_pixels, 8, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_h, 8, sse4)
|
|
|
|
mc_uni_w_func(epel_h, 8, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_v, 8, sse4)
|
|
|
|
mc_uni_w_func(epel_v, 8, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_hv, 8, sse4)
|
|
|
|
mc_uni_w_func(epel_hv, 8, 6, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_h, 8, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_v, 8, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_hv, 8, sse4)
|
|
|
|
|
|
|
|
mc_uni_w_funcs(pel_pixels, 10, sse4)
|
|
|
|
mc_uni_w_func(pel_pixels, 10, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_h, 10, sse4)
|
|
|
|
mc_uni_w_func(epel_h, 10, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_v, 10, sse4)
|
|
|
|
mc_uni_w_func(epel_v, 10, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_hv, 10, sse4)
|
|
|
|
mc_uni_w_func(epel_hv, 10, 6, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_h, 10, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_v, 10, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_hv, 10, sse4)
|
|
|
|
|
|
|
|
mc_uni_w_funcs(pel_pixels, 12, sse4)
|
|
|
|
mc_uni_w_func(pel_pixels, 12, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_h, 12, sse4)
|
|
|
|
mc_uni_w_func(epel_h, 12, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_v, 12, sse4)
|
|
|
|
mc_uni_w_func(epel_v, 12, 6, sse4)
|
|
|
|
mc_uni_w_funcs(epel_hv, 12, sse4)
|
|
|
|
mc_uni_w_func(epel_hv, 12, 6, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_h, 12, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_v, 12, sse4)
|
|
|
|
mc_uni_w_funcs(qpel_hv, 12, sse4)
|
2014-05-07 10:58:34 +03:00
|
|
|
|
|
|
|
#define mc_bi_w_func(name, bitd, W, opt) \
|
|
|
|
void ff_hevc_put_hevc_bi_w_##name##W##_##bitd##_##opt(uint8_t *_dst, ptrdiff_t _dststride, \
|
2014-04-28 17:12:28 +03:00
|
|
|
uint8_t *_src, ptrdiff_t _srcstride, \
|
2014-07-28 13:13:06 +03:00
|
|
|
int16_t *_src2, \
|
2014-04-28 17:12:28 +03:00
|
|
|
int height, int denom, \
|
|
|
|
int _wx0, int _wx1, int _ox0, int _ox1, \
|
|
|
|
intptr_t mx, intptr_t my, int width) \
|
|
|
|
{ \
|
2014-07-28 20:17:26 +03:00
|
|
|
LOCAL_ALIGNED_16(int16_t, temp, [71 * MAX_PB_SIZE]); \
|
|
|
|
ff_hevc_put_hevc_##name##W##_##bitd##_##opt(temp, _src, _srcstride, height, mx, my, width); \
|
2015-02-05 13:37:52 +02:00
|
|
|
ff_hevc_put_hevc_bi_w##W##_##bitd##_##opt(_dst, _dststride, temp, _src2, \
|
|
|
|
height, denom, _wx0, _wx1, _ox0, _ox1); \
|
2014-04-28 17:12:28 +03:00
|
|
|
}
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
#define mc_bi_w_funcs(name, bitd, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 4, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 8, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 12, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 16, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 24, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 32, opt) \
|
|
|
|
mc_bi_w_func(name, bitd, 48, opt) \
|
2014-05-07 10:58:34 +03:00
|
|
|
mc_bi_w_func(name, bitd, 64, opt)
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
mc_bi_w_funcs(pel_pixels, 8, sse4)
|
|
|
|
mc_bi_w_func(pel_pixels, 8, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_h, 8, sse4)
|
|
|
|
mc_bi_w_func(epel_h, 8, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_v, 8, sse4)
|
|
|
|
mc_bi_w_func(epel_v, 8, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_hv, 8, sse4)
|
|
|
|
mc_bi_w_func(epel_hv, 8, 6, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_h, 8, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_v, 8, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_hv, 8, sse4)
|
|
|
|
|
|
|
|
mc_bi_w_funcs(pel_pixels, 10, sse4)
|
|
|
|
mc_bi_w_func(pel_pixels, 10, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_h, 10, sse4)
|
|
|
|
mc_bi_w_func(epel_h, 10, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_v, 10, sse4)
|
|
|
|
mc_bi_w_func(epel_v, 10, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_hv, 10, sse4)
|
|
|
|
mc_bi_w_func(epel_hv, 10, 6, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_h, 10, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_v, 10, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_hv, 10, sse4)
|
|
|
|
|
|
|
|
mc_bi_w_funcs(pel_pixels, 12, sse4)
|
|
|
|
mc_bi_w_func(pel_pixels, 12, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_h, 12, sse4)
|
|
|
|
mc_bi_w_func(epel_h, 12, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_v, 12, sse4)
|
|
|
|
mc_bi_w_func(epel_v, 12, 6, sse4)
|
|
|
|
mc_bi_w_funcs(epel_hv, 12, sse4)
|
|
|
|
mc_bi_w_func(epel_hv, 12, 6, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_h, 12, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_v, 12, sse4)
|
|
|
|
mc_bi_w_funcs(qpel_hv, 12, sse4)
|
2014-05-09 06:16:27 +03:00
|
|
|
#endif //ARCH_X86_64 && HAVE_SSE4_EXTERNAL
|
2014-05-06 18:38:20 +03:00
|
|
|
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
#define SAO_BAND_FILTER_FUNCS(bitd, opt) \
|
|
|
|
void ff_hevc_sao_band_filter_8_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
|
|
|
|
int16_t *sao_offset_val, int sao_left_class, int width, int height); \
|
|
|
|
void ff_hevc_sao_band_filter_16_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
|
|
|
|
int16_t *sao_offset_val, int sao_left_class, int width, int height); \
|
|
|
|
void ff_hevc_sao_band_filter_32_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
|
|
|
|
int16_t *sao_offset_val, int sao_left_class, int width, int height); \
|
|
|
|
void ff_hevc_sao_band_filter_48_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
|
|
|
|
int16_t *sao_offset_val, int sao_left_class, int width, int height); \
|
|
|
|
void ff_hevc_sao_band_filter_64_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, \
|
2015-10-23 17:23:42 +02:00
|
|
|
int16_t *sao_offset_val, int sao_left_class, int width, int height);
|
|
|
|
|
|
|
|
SAO_BAND_FILTER_FUNCS(8, sse2)
|
|
|
|
SAO_BAND_FILTER_FUNCS(10, sse2)
|
|
|
|
SAO_BAND_FILTER_FUNCS(12, sse2)
|
|
|
|
SAO_BAND_FILTER_FUNCS(8, avx)
|
|
|
|
SAO_BAND_FILTER_FUNCS(10, avx)
|
|
|
|
SAO_BAND_FILTER_FUNCS(12, avx)
|
|
|
|
SAO_BAND_FILTER_FUNCS(8, avx2)
|
|
|
|
SAO_BAND_FILTER_FUNCS(10, avx2)
|
|
|
|
SAO_BAND_FILTER_FUNCS(12, avx2)
|
2014-04-28 17:12:28 +03:00
|
|
|
|
x86/hevcdsp: add ff_hevc_sao_edge_filter_8_{ssse3,avx2}
Original x86 intrinsics code and initial yasm port by Pierre-Edouard Lepere.
Refactoring and optimizations by James Almer.
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
Width 32
158583 decicycles in edge, sao_edge_filter_8 runs, 0 skips
5205 decicycles in ff_hevc_sao_edge_filter_32_8_ssse3, 32767 runs, 1 skips
2942 decicycles in ff_hevc_sao_edge_filter_32_8_avx2, 32767 runs, 1 skips
Width 64
705639 decicycles in sao_edge_filter_8, 262144 runs, 0 skips
19224 decicycles in ff_hevc_sao_edge_filter_64_8_ssse3, 262111 runs, 33 skips
10433 decicycles in ff_hevc_sao_edge_filter_64_8_avx2, 262115 runs, 29 skips
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-05 01:21:56 +02:00
|
|
|
#define SAO_BAND_INIT(bitd, opt) do { \
|
|
|
|
c->sao_band_filter[0] = ff_hevc_sao_band_filter_8_##bitd##_##opt; \
|
|
|
|
c->sao_band_filter[1] = ff_hevc_sao_band_filter_16_##bitd##_##opt; \
|
|
|
|
c->sao_band_filter[2] = ff_hevc_sao_band_filter_32_##bitd##_##opt; \
|
|
|
|
c->sao_band_filter[3] = ff_hevc_sao_band_filter_48_##bitd##_##opt; \
|
|
|
|
c->sao_band_filter[4] = ff_hevc_sao_band_filter_64_##bitd##_##opt; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define SAO_EDGE_FILTER_FUNCS(bitd, opt) \
|
|
|
|
void ff_hevc_sao_edge_filter_8_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
|
|
|
|
int eo, int width, int height); \
|
|
|
|
void ff_hevc_sao_edge_filter_16_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
|
|
|
|
int eo, int width, int height); \
|
|
|
|
void ff_hevc_sao_edge_filter_32_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
|
|
|
|
int eo, int width, int height); \
|
|
|
|
void ff_hevc_sao_edge_filter_48_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
|
|
|
|
int eo, int width, int height); \
|
|
|
|
void ff_hevc_sao_edge_filter_64_##bitd##_##opt(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, \
|
|
|
|
int eo, int width, int height); \
|
|
|
|
|
2015-10-23 17:23:42 +02:00
|
|
|
SAO_EDGE_FILTER_FUNCS(8, ssse3)
|
|
|
|
SAO_EDGE_FILTER_FUNCS(8, avx2)
|
|
|
|
SAO_EDGE_FILTER_FUNCS(10, sse2)
|
|
|
|
SAO_EDGE_FILTER_FUNCS(10, avx2)
|
|
|
|
SAO_EDGE_FILTER_FUNCS(12, sse2)
|
|
|
|
SAO_EDGE_FILTER_FUNCS(12, avx2)
|
x86/hevcdsp: add ff_hevc_sao_edge_filter_8_{ssse3,avx2}
Original x86 intrinsics code and initial yasm port by Pierre-Edouard Lepere.
Refactoring and optimizations by James Almer.
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
Width 32
158583 decicycles in edge, sao_edge_filter_8 runs, 0 skips
5205 decicycles in ff_hevc_sao_edge_filter_32_8_ssse3, 32767 runs, 1 skips
2942 decicycles in ff_hevc_sao_edge_filter_32_8_avx2, 32767 runs, 1 skips
Width 64
705639 decicycles in sao_edge_filter_8, 262144 runs, 0 skips
19224 decicycles in ff_hevc_sao_edge_filter_64_8_ssse3, 262111 runs, 33 skips
10433 decicycles in ff_hevc_sao_edge_filter_64_8_avx2, 262115 runs, 29 skips
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-05 01:21:56 +02:00
|
|
|
|
|
|
|
#define SAO_EDGE_INIT(bitd, opt) do { \
|
|
|
|
c->sao_edge_filter[0] = ff_hevc_sao_edge_filter_8_##bitd##_##opt; \
|
|
|
|
c->sao_edge_filter[1] = ff_hevc_sao_edge_filter_16_##bitd##_##opt; \
|
|
|
|
c->sao_edge_filter[2] = ff_hevc_sao_edge_filter_32_##bitd##_##opt; \
|
|
|
|
c->sao_edge_filter[3] = ff_hevc_sao_edge_filter_48_##bitd##_##opt; \
|
|
|
|
c->sao_edge_filter[4] = ff_hevc_sao_edge_filter_64_##bitd##_##opt; \
|
|
|
|
} while (0)
|
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
#define EPEL_LINKS(pointer, my, mx, fname, bitd, opt ) \
|
2014-06-01 17:12:57 +03:00
|
|
|
PEL_LINK(pointer, 1, my , mx , fname##4 , bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 2, my , mx , fname##6 , bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 3, my , mx , fname##8 , bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 4, my , mx , fname##12, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 5, my , mx , fname##16, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 6, my , mx , fname##24, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 7, my , mx , fname##32, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 8, my , mx , fname##48, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 9, my , mx , fname##64, bitd, opt )
|
|
|
|
#define QPEL_LINKS(pointer, my, mx, fname, bitd, opt) \
|
|
|
|
PEL_LINK(pointer, 1, my , mx , fname##4 , bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 3, my , mx , fname##8 , bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 4, my , mx , fname##12, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 5, my , mx , fname##16, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 6, my , mx , fname##24, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 7, my , mx , fname##32, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 8, my , mx , fname##48, bitd, opt ); \
|
|
|
|
PEL_LINK(pointer, 9, my , mx , fname##64, bitd, opt )
|
2014-04-28 17:12:28 +03:00
|
|
|
|
2014-06-18 06:57:16 +03:00
|
|
|
void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth)
|
2014-04-28 17:12:28 +03:00
|
|
|
{
|
2014-06-18 06:57:16 +03:00
|
|
|
int cpu_flags = av_get_cpu_flags();
|
2014-04-28 17:12:28 +03:00
|
|
|
|
|
|
|
if (bit_depth == 8) {
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_MMXEXT(cpu_flags)) {
|
2016-06-29 11:56:42 +02:00
|
|
|
c->idct_dc[0] = ff_hevc_idct_4x4_dc_8_mmxext;
|
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_mmxext;
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[0] = ff_hevc_add_residual4_8_mmxext;
|
2014-06-13 14:29:17 +03:00
|
|
|
}
|
2014-06-18 06:57:16 +03:00
|
|
|
if (EXTERNAL_SSE2(cpu_flags)) {
|
2014-07-22 11:57:34 +03:00
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_8_sse2;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_8_sse2;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_8_sse2;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_8_sse2;
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
|
2014-08-18 11:01:09 +03:00
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(8, sse2);
|
|
|
|
|
2016-06-29 11:56:42 +02:00
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_sse2;
|
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_sse2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_sse2;
|
2014-08-18 11:01:09 +03:00
|
|
|
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[1] = ff_hevc_add_residual8_8_sse2;
|
|
|
|
c->add_residual[2] = ff_hevc_add_residual16_8_sse2;
|
|
|
|
c->add_residual[3] = ff_hevc_add_residual32_8_sse2;
|
2014-05-15 17:31:11 +03:00
|
|
|
}
|
x86/hevcdsp: add ff_hevc_sao_edge_filter_8_{ssse3,avx2}
Original x86 intrinsics code and initial yasm port by Pierre-Edouard Lepere.
Refactoring and optimizations by James Almer.
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
Width 32
158583 decicycles in edge, sao_edge_filter_8 runs, 0 skips
5205 decicycles in ff_hevc_sao_edge_filter_32_8_ssse3, 32767 runs, 1 skips
2942 decicycles in ff_hevc_sao_edge_filter_32_8_avx2, 32767 runs, 1 skips
Width 64
705639 decicycles in sao_edge_filter_8, 262144 runs, 0 skips
19224 decicycles in ff_hevc_sao_edge_filter_64_8_ssse3, 262111 runs, 33 skips
10433 decicycles in ff_hevc_sao_edge_filter_64_8_avx2, 262115 runs, 29 skips
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-05 01:21:56 +02:00
|
|
|
if (EXTERNAL_SSSE3(cpu_flags)) {
|
|
|
|
if(ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_8_ssse3;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_8_ssse3;
|
|
|
|
}
|
|
|
|
SAO_EDGE_INIT(8, ssse3);
|
2014-05-15 17:31:11 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_SSE4(cpu_flags) && ARCH_X86_64) {
|
2014-04-28 17:12:28 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 0, pel_pixels, 8, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 1, epel_h, 8, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 0, epel_v, 8, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 1, epel_hv, 8, sse4);
|
2014-04-28 17:12:28 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 0, pel_pixels, 8, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 1, qpel_h, 8, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 0, qpel_v, 8, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 1, qpel_hv, 8, sse4);
|
2014-04-28 17:12:28 +03:00
|
|
|
}
|
2014-07-29 10:30:13 +03:00
|
|
|
if (EXTERNAL_AVX(cpu_flags)) {
|
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_8_avx;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_8_avx;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_8_avx;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_8_avx;
|
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(8, avx);
|
|
|
|
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[1] = ff_hevc_add_residual8_8_avx;
|
|
|
|
c->add_residual[2] = ff_hevc_add_residual16_8_avx;
|
|
|
|
c->add_residual[3] = ff_hevc_add_residual32_8_avx;
|
2014-07-29 10:30:13 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_AVX2(cpu_flags)) {
|
2016-02-07 05:52:06 +02:00
|
|
|
c->sao_band_filter[0] = ff_hevc_sao_band_filter_8_8_avx2;
|
|
|
|
c->sao_band_filter[1] = ff_hevc_sao_band_filter_16_8_avx2;
|
|
|
|
}
|
|
|
|
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
|
2017-01-31 17:50:21 +02:00
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_avx2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_avx2;
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
if (ARCH_X86_64) {
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
c->put_hevc_epel[7][0][0] = ff_hevc_put_hevc_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_epel[8][0][0] = ff_hevc_put_hevc_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_epel[9][0][0] = ff_hevc_put_hevc_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[7][0][0] = ff_hevc_put_hevc_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_qpel[8][0][0] = ff_hevc_put_hevc_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_qpel[9][0][0] = ff_hevc_put_hevc_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[7][0][0] = ff_hevc_put_hevc_uni_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][0][0] = ff_hevc_put_hevc_uni_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][0][0] = ff_hevc_put_hevc_uni_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[7][0][0] = ff_hevc_put_hevc_uni_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_hevc_uni_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_hevc_uni_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[7][0][0] = ff_hevc_put_hevc_bi_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][0][0] = ff_hevc_put_hevc_bi_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][0][0] = ff_hevc_put_hevc_bi_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[7][0][0] = ff_hevc_put_hevc_bi_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][0][0] = ff_hevc_put_hevc_bi_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][0][0] = ff_hevc_put_hevc_bi_pel_pixels64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[7][0][1] = ff_hevc_put_hevc_epel_h32_8_avx2;
|
|
|
|
c->put_hevc_epel[8][0][1] = ff_hevc_put_hevc_epel_h48_8_avx2;
|
|
|
|
c->put_hevc_epel[9][0][1] = ff_hevc_put_hevc_epel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[7][0][1] = ff_hevc_put_hevc_uni_epel_h32_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][0][1] = ff_hevc_put_hevc_uni_epel_h48_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][0][1] = ff_hevc_put_hevc_uni_epel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[7][0][1] = ff_hevc_put_hevc_bi_epel_h32_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][0][1] = ff_hevc_put_hevc_bi_epel_h48_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][0][1] = ff_hevc_put_hevc_bi_epel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[7][1][0] = ff_hevc_put_hevc_epel_v32_8_avx2;
|
|
|
|
c->put_hevc_epel[8][1][0] = ff_hevc_put_hevc_epel_v48_8_avx2;
|
|
|
|
c->put_hevc_epel[9][1][0] = ff_hevc_put_hevc_epel_v64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[7][1][0] = ff_hevc_put_hevc_uni_epel_v32_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][1][0] = ff_hevc_put_hevc_uni_epel_v48_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][1][0] = ff_hevc_put_hevc_uni_epel_v64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[7][1][0] = ff_hevc_put_hevc_bi_epel_v32_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][1][0] = ff_hevc_put_hevc_bi_epel_v48_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][1][0] = ff_hevc_put_hevc_bi_epel_v64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[7][1][1] = ff_hevc_put_hevc_epel_hv32_8_avx2;
|
|
|
|
c->put_hevc_epel[8][1][1] = ff_hevc_put_hevc_epel_hv48_8_avx2;
|
|
|
|
c->put_hevc_epel[9][1][1] = ff_hevc_put_hevc_epel_hv64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[7][1][1] = ff_hevc_put_hevc_uni_epel_hv32_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][1][1] = ff_hevc_put_hevc_uni_epel_hv48_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][1][1] = ff_hevc_put_hevc_uni_epel_hv64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[7][1][1] = ff_hevc_put_hevc_bi_epel_hv32_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][1][1] = ff_hevc_put_hevc_bi_epel_hv48_8_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][1][1] = ff_hevc_put_hevc_bi_epel_hv64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[7][0][1] = ff_hevc_put_hevc_qpel_h32_8_avx2;
|
|
|
|
c->put_hevc_qpel[8][0][1] = ff_hevc_put_hevc_qpel_h48_8_avx2;
|
|
|
|
c->put_hevc_qpel[9][0][1] = ff_hevc_put_hevc_qpel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[7][1][0] = ff_hevc_put_hevc_qpel_v32_8_avx2;
|
|
|
|
c->put_hevc_qpel[8][1][0] = ff_hevc_put_hevc_qpel_v48_8_avx2;
|
|
|
|
c->put_hevc_qpel[9][1][0] = ff_hevc_put_hevc_qpel_v64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[7][0][1] = ff_hevc_put_hevc_uni_qpel_h32_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][0][1] = ff_hevc_put_hevc_uni_qpel_h48_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][0][1] = ff_hevc_put_hevc_uni_qpel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[7][1][0] = ff_hevc_put_hevc_uni_qpel_v32_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][1][0] = ff_hevc_put_hevc_uni_qpel_v48_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][1][0] = ff_hevc_put_hevc_uni_qpel_v64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[7][0][1] = ff_hevc_put_hevc_bi_qpel_h32_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][0][1] = ff_hevc_put_hevc_bi_qpel_h48_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][0][1] = ff_hevc_put_hevc_bi_qpel_h64_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[7][1][0] = ff_hevc_put_hevc_bi_qpel_v32_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][1][0] = ff_hevc_put_hevc_bi_qpel_v48_8_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][1][0] = ff_hevc_put_hevc_bi_qpel_v64_8_avx2;
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(8, avx2);
|
|
|
|
|
x86/hevcdsp: add ff_hevc_sao_edge_filter_8_{ssse3,avx2}
Original x86 intrinsics code and initial yasm port by Pierre-Edouard Lepere.
Refactoring and optimizations by James Almer.
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
Width 32
158583 decicycles in edge, sao_edge_filter_8 runs, 0 skips
5205 decicycles in ff_hevc_sao_edge_filter_32_8_ssse3, 32767 runs, 1 skips
2942 decicycles in ff_hevc_sao_edge_filter_32_8_avx2, 32767 runs, 1 skips
Width 64
705639 decicycles in sao_edge_filter_8, 262144 runs, 0 skips
19224 decicycles in ff_hevc_sao_edge_filter_64_8_ssse3, 262111 runs, 33 skips
10433 decicycles in ff_hevc_sao_edge_filter_64_8_avx2, 262115 runs, 29 skips
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-05 01:21:56 +02:00
|
|
|
c->sao_edge_filter[2] = ff_hevc_sao_edge_filter_32_8_avx2;
|
|
|
|
c->sao_edge_filter[3] = ff_hevc_sao_edge_filter_48_8_avx2;
|
|
|
|
c->sao_edge_filter[4] = ff_hevc_sao_edge_filter_64_8_avx2;
|
2014-09-01 05:43:02 +03:00
|
|
|
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[3] = ff_hevc_add_residual32_8_avx2;
|
2014-06-16 15:47:21 +03:00
|
|
|
}
|
2014-04-28 17:12:28 +03:00
|
|
|
} else if (bit_depth == 10) {
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_MMXEXT(cpu_flags)) {
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[0] = ff_hevc_add_residual4_10_mmxext;
|
2016-06-29 11:56:42 +02:00
|
|
|
c->idct_dc[0] = ff_hevc_idct_4x4_dc_10_mmxext;
|
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_mmxext;
|
2014-06-13 14:29:17 +03:00
|
|
|
}
|
2014-06-18 06:57:16 +03:00
|
|
|
if (EXTERNAL_SSE2(cpu_flags)) {
|
2014-07-22 11:57:34 +03:00
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_10_sse2;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_10_sse2;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_10_sse2;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_10_sse2;
|
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(10, sse2);
|
2015-02-10 04:18:32 +02:00
|
|
|
SAO_EDGE_INIT(10, sse2);
|
2014-07-22 11:57:34 +03:00
|
|
|
|
2016-06-29 11:56:42 +02:00
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_sse2;
|
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_sse2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_sse2;
|
2014-08-18 11:01:09 +03:00
|
|
|
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[1] = ff_hevc_add_residual8_10_sse2;
|
|
|
|
c->add_residual[2] = ff_hevc_add_residual16_10_sse2;
|
|
|
|
c->add_residual[3] = ff_hevc_add_residual32_10_sse2;
|
2014-07-22 11:57:34 +03:00
|
|
|
}
|
2014-06-18 06:57:16 +03:00
|
|
|
if (EXTERNAL_SSSE3(cpu_flags) && ARCH_X86_64) {
|
2014-07-22 11:57:34 +03:00
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_10_ssse3;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_10_ssse3;
|
2014-05-15 17:31:11 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_SSE4(cpu_flags) && ARCH_X86_64) {
|
2014-05-07 10:58:34 +03:00
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 0, pel_pixels, 10, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 1, epel_h, 10, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 0, epel_v, 10, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 1, epel_hv, 10, sse4);
|
2014-04-28 17:12:28 +03:00
|
|
|
|
2014-05-07 10:58:34 +03:00
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 0, pel_pixels, 10, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 1, qpel_h, 10, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 0, qpel_v, 10, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 1, qpel_hv, 10, sse4);
|
2014-04-28 17:12:28 +03:00
|
|
|
}
|
2014-07-29 10:30:13 +03:00
|
|
|
if (EXTERNAL_AVX(cpu_flags)) {
|
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_10_avx;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_10_avx;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_10_avx;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_10_avx;
|
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(10, avx);
|
2014-07-29 10:30:13 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_AVX2(cpu_flags)) {
|
2016-02-07 05:52:06 +02:00
|
|
|
c->sao_band_filter[0] = ff_hevc_sao_band_filter_8_10_avx2;
|
|
|
|
}
|
|
|
|
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
|
2017-01-31 17:50:21 +02:00
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_avx2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_avx2;
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
if (ARCH_X86_64) {
|
x86: hevc_mc: add AVX2 optimizations
before
33304 decicycles in luma_bi_1, 523066 runs, 1222 skips
38138 decicycles in luma_bi_2, 523427 runs, 861 skips
13490 decicycles in luma_uni, 516138 runs, 8150 skips
after
20185 decicycles in luma_bi_1, 519970 runs, 4318 skips
24620 decicycles in luma_bi_2, 521024 runs, 3264 skips
10397 decicycles in luma_uni, 515715 runs, 8573 skips
Conflicts:
libavcodec/x86/hevc_mc.asm
libavcodec/x86/hevcdsp_init.c
Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2015-02-05 21:20:39 +02:00
|
|
|
c->put_hevc_epel[5][0][0] = ff_hevc_put_hevc_pel_pixels16_10_avx2;
|
|
|
|
c->put_hevc_epel[6][0][0] = ff_hevc_put_hevc_pel_pixels24_10_avx2;
|
|
|
|
c->put_hevc_epel[7][0][0] = ff_hevc_put_hevc_pel_pixels32_10_avx2;
|
|
|
|
c->put_hevc_epel[8][0][0] = ff_hevc_put_hevc_pel_pixels48_10_avx2;
|
|
|
|
c->put_hevc_epel[9][0][0] = ff_hevc_put_hevc_pel_pixels64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[5][0][0] = ff_hevc_put_hevc_pel_pixels16_10_avx2;
|
|
|
|
c->put_hevc_qpel[6][0][0] = ff_hevc_put_hevc_pel_pixels24_10_avx2;
|
|
|
|
c->put_hevc_qpel[7][0][0] = ff_hevc_put_hevc_pel_pixels32_10_avx2;
|
|
|
|
c->put_hevc_qpel[8][0][0] = ff_hevc_put_hevc_pel_pixels48_10_avx2;
|
|
|
|
c->put_hevc_qpel[9][0][0] = ff_hevc_put_hevc_pel_pixels64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[5][0][0] = ff_hevc_put_hevc_uni_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[6][0][0] = ff_hevc_put_hevc_uni_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[7][0][0] = ff_hevc_put_hevc_uni_pel_pixels64_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][0][0] = ff_hevc_put_hevc_uni_pel_pixels96_8_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][0][0] = ff_hevc_put_hevc_uni_pel_pixels128_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[5][0][0] = ff_hevc_put_hevc_uni_pel_pixels32_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[6][0][0] = ff_hevc_put_hevc_uni_pel_pixels48_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[7][0][0] = ff_hevc_put_hevc_uni_pel_pixels64_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_hevc_uni_pel_pixels96_8_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_hevc_uni_pel_pixels128_8_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[5][0][0] = ff_hevc_put_hevc_bi_pel_pixels16_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[6][0][0] = ff_hevc_put_hevc_bi_pel_pixels24_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[7][0][0] = ff_hevc_put_hevc_bi_pel_pixels32_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][0][0] = ff_hevc_put_hevc_bi_pel_pixels48_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][0][0] = ff_hevc_put_hevc_bi_pel_pixels64_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[5][0][0] = ff_hevc_put_hevc_bi_pel_pixels16_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[6][0][0] = ff_hevc_put_hevc_bi_pel_pixels24_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[7][0][0] = ff_hevc_put_hevc_bi_pel_pixels32_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][0][0] = ff_hevc_put_hevc_bi_pel_pixels48_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][0][0] = ff_hevc_put_hevc_bi_pel_pixels64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[5][0][1] = ff_hevc_put_hevc_epel_h16_10_avx2;
|
|
|
|
c->put_hevc_epel[6][0][1] = ff_hevc_put_hevc_epel_h24_10_avx2;
|
|
|
|
c->put_hevc_epel[7][0][1] = ff_hevc_put_hevc_epel_h32_10_avx2;
|
|
|
|
c->put_hevc_epel[8][0][1] = ff_hevc_put_hevc_epel_h48_10_avx2;
|
|
|
|
c->put_hevc_epel[9][0][1] = ff_hevc_put_hevc_epel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[5][0][1] = ff_hevc_put_hevc_uni_epel_h16_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[6][0][1] = ff_hevc_put_hevc_uni_epel_h24_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[7][0][1] = ff_hevc_put_hevc_uni_epel_h32_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][0][1] = ff_hevc_put_hevc_uni_epel_h48_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][0][1] = ff_hevc_put_hevc_uni_epel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[5][0][1] = ff_hevc_put_hevc_bi_epel_h16_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[6][0][1] = ff_hevc_put_hevc_bi_epel_h24_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[7][0][1] = ff_hevc_put_hevc_bi_epel_h32_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][0][1] = ff_hevc_put_hevc_bi_epel_h48_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][0][1] = ff_hevc_put_hevc_bi_epel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[5][1][0] = ff_hevc_put_hevc_epel_v16_10_avx2;
|
|
|
|
c->put_hevc_epel[6][1][0] = ff_hevc_put_hevc_epel_v24_10_avx2;
|
|
|
|
c->put_hevc_epel[7][1][0] = ff_hevc_put_hevc_epel_v32_10_avx2;
|
|
|
|
c->put_hevc_epel[8][1][0] = ff_hevc_put_hevc_epel_v48_10_avx2;
|
|
|
|
c->put_hevc_epel[9][1][0] = ff_hevc_put_hevc_epel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[5][1][0] = ff_hevc_put_hevc_uni_epel_v16_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[6][1][0] = ff_hevc_put_hevc_uni_epel_v24_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[7][1][0] = ff_hevc_put_hevc_uni_epel_v32_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][1][0] = ff_hevc_put_hevc_uni_epel_v48_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][1][0] = ff_hevc_put_hevc_uni_epel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[5][1][0] = ff_hevc_put_hevc_bi_epel_v16_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[6][1][0] = ff_hevc_put_hevc_bi_epel_v24_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[7][1][0] = ff_hevc_put_hevc_bi_epel_v32_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][1][0] = ff_hevc_put_hevc_bi_epel_v48_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][1][0] = ff_hevc_put_hevc_bi_epel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel[5][1][1] = ff_hevc_put_hevc_epel_hv16_10_avx2;
|
|
|
|
c->put_hevc_epel[6][1][1] = ff_hevc_put_hevc_epel_hv24_10_avx2;
|
|
|
|
c->put_hevc_epel[7][1][1] = ff_hevc_put_hevc_epel_hv32_10_avx2;
|
|
|
|
c->put_hevc_epel[8][1][1] = ff_hevc_put_hevc_epel_hv48_10_avx2;
|
|
|
|
c->put_hevc_epel[9][1][1] = ff_hevc_put_hevc_epel_hv64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_uni[5][1][1] = ff_hevc_put_hevc_uni_epel_hv16_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[6][1][1] = ff_hevc_put_hevc_uni_epel_hv24_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[7][1][1] = ff_hevc_put_hevc_uni_epel_hv32_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[8][1][1] = ff_hevc_put_hevc_uni_epel_hv48_10_avx2;
|
|
|
|
c->put_hevc_epel_uni[9][1][1] = ff_hevc_put_hevc_uni_epel_hv64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_epel_bi[5][1][1] = ff_hevc_put_hevc_bi_epel_hv16_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[6][1][1] = ff_hevc_put_hevc_bi_epel_hv24_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[7][1][1] = ff_hevc_put_hevc_bi_epel_hv32_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[8][1][1] = ff_hevc_put_hevc_bi_epel_hv48_10_avx2;
|
|
|
|
c->put_hevc_epel_bi[9][1][1] = ff_hevc_put_hevc_bi_epel_hv64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[5][0][1] = ff_hevc_put_hevc_qpel_h16_10_avx2;
|
|
|
|
c->put_hevc_qpel[6][0][1] = ff_hevc_put_hevc_qpel_h24_10_avx2;
|
|
|
|
c->put_hevc_qpel[7][0][1] = ff_hevc_put_hevc_qpel_h32_10_avx2;
|
|
|
|
c->put_hevc_qpel[8][0][1] = ff_hevc_put_hevc_qpel_h48_10_avx2;
|
|
|
|
c->put_hevc_qpel[9][0][1] = ff_hevc_put_hevc_qpel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[5][0][1] = ff_hevc_put_hevc_uni_qpel_h16_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[6][0][1] = ff_hevc_put_hevc_uni_qpel_h24_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[7][0][1] = ff_hevc_put_hevc_uni_qpel_h32_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][0][1] = ff_hevc_put_hevc_uni_qpel_h48_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][0][1] = ff_hevc_put_hevc_uni_qpel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[5][0][1] = ff_hevc_put_hevc_bi_qpel_h16_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[6][0][1] = ff_hevc_put_hevc_bi_qpel_h24_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[7][0][1] = ff_hevc_put_hevc_bi_qpel_h32_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][0][1] = ff_hevc_put_hevc_bi_qpel_h48_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][0][1] = ff_hevc_put_hevc_bi_qpel_h64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[5][1][0] = ff_hevc_put_hevc_qpel_v16_10_avx2;
|
|
|
|
c->put_hevc_qpel[6][1][0] = ff_hevc_put_hevc_qpel_v24_10_avx2;
|
|
|
|
c->put_hevc_qpel[7][1][0] = ff_hevc_put_hevc_qpel_v32_10_avx2;
|
|
|
|
c->put_hevc_qpel[8][1][0] = ff_hevc_put_hevc_qpel_v48_10_avx2;
|
|
|
|
c->put_hevc_qpel[9][1][0] = ff_hevc_put_hevc_qpel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[5][1][0] = ff_hevc_put_hevc_uni_qpel_v16_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[6][1][0] = ff_hevc_put_hevc_uni_qpel_v24_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[7][1][0] = ff_hevc_put_hevc_uni_qpel_v32_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][1][0] = ff_hevc_put_hevc_uni_qpel_v48_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][1][0] = ff_hevc_put_hevc_uni_qpel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[5][1][0] = ff_hevc_put_hevc_bi_qpel_v16_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[6][1][0] = ff_hevc_put_hevc_bi_qpel_v24_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[7][1][0] = ff_hevc_put_hevc_bi_qpel_v32_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][1][0] = ff_hevc_put_hevc_bi_qpel_v48_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][1][0] = ff_hevc_put_hevc_bi_qpel_v64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel[5][1][1] = ff_hevc_put_hevc_qpel_hv16_10_avx2;
|
|
|
|
c->put_hevc_qpel[6][1][1] = ff_hevc_put_hevc_qpel_hv24_10_avx2;
|
|
|
|
c->put_hevc_qpel[7][1][1] = ff_hevc_put_hevc_qpel_hv32_10_avx2;
|
|
|
|
c->put_hevc_qpel[8][1][1] = ff_hevc_put_hevc_qpel_hv48_10_avx2;
|
|
|
|
c->put_hevc_qpel[9][1][1] = ff_hevc_put_hevc_qpel_hv64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_uni[5][1][1] = ff_hevc_put_hevc_uni_qpel_hv16_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[6][1][1] = ff_hevc_put_hevc_uni_qpel_hv24_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[7][1][1] = ff_hevc_put_hevc_uni_qpel_hv32_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[8][1][1] = ff_hevc_put_hevc_uni_qpel_hv48_10_avx2;
|
|
|
|
c->put_hevc_qpel_uni[9][1][1] = ff_hevc_put_hevc_uni_qpel_hv64_10_avx2;
|
|
|
|
|
|
|
|
c->put_hevc_qpel_bi[5][1][1] = ff_hevc_put_hevc_bi_qpel_hv16_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[6][1][1] = ff_hevc_put_hevc_bi_qpel_hv24_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[7][1][1] = ff_hevc_put_hevc_bi_qpel_hv32_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[8][1][1] = ff_hevc_put_hevc_bi_qpel_hv48_10_avx2;
|
|
|
|
c->put_hevc_qpel_bi[9][1][1] = ff_hevc_put_hevc_bi_qpel_hv64_10_avx2;
|
x86/hevc: add ff_hevc_sao_band_filter_{8,10,12}_{sse2,avx,avx2}
Original x86 intrinsics code and initial 8bit yasm port by Pierre-Edouard Lepere.
10/12bit yasm ports, refactoring and optimizations by James Almer
Benchmarks of BQTerrace_1920x1080_60_qp22.bin with an Intel Core i5-4200U
width 32
40338 decicycles in sao_band_filter_0_8, 2048 runs, 0 skips
8056 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 2048 runs, 0 skips
7458 decicycles in ff_hevc_sao_band_filter_8_32_avx, 2048 runs, 0 skips
4504 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 2048 runs, 0 skips
width 64
136046 decicycles in sao_band_filter_0_8, 16384 runs, 0 skips
28576 decicycles in ff_hevc_sao_band_filter_8_32_sse2, 16384 runs, 0 skips
26707 decicycles in ff_hevc_sao_band_filter_8_32_avx, 16384 runs, 0 skips
14387 decicycles in ff_hevc_sao_band_filter_8_32_avx2, 16384 runs, 0 skips
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2015-02-01 20:01:36 +02:00
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(10, avx2);
|
2015-12-06 07:47:45 +02:00
|
|
|
SAO_EDGE_INIT(10, avx2);
|
2014-06-13 14:29:17 +03:00
|
|
|
|
2017-01-31 12:20:54 +02:00
|
|
|
c->add_residual[2] = ff_hevc_add_residual16_10_avx2;
|
|
|
|
c->add_residual[3] = ff_hevc_add_residual32_10_avx2;
|
2014-08-18 11:01:09 +03:00
|
|
|
|
2014-06-16 15:47:21 +03:00
|
|
|
}
|
2014-07-25 18:55:40 +03:00
|
|
|
} else if (bit_depth == 12) {
|
2014-07-26 21:08:51 +03:00
|
|
|
if (EXTERNAL_MMXEXT(cpu_flags)) {
|
2017-01-31 17:50:21 +02:00
|
|
|
c->idct_dc[0] = ff_hevc_idct_4x4_dc_12_mmxext;
|
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_12_mmxext;
|
2014-07-26 21:08:51 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_SSE2(cpu_flags)) {
|
2014-07-25 18:55:40 +03:00
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_12_sse2;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_12_sse2;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_12_sse2;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_12_sse2;
|
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(12, sse2);
|
2015-02-10 04:18:32 +02:00
|
|
|
SAO_EDGE_INIT(12, sse2);
|
2014-07-26 21:08:51 +03:00
|
|
|
|
2017-01-31 17:50:21 +02:00
|
|
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_12_sse2;
|
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_12_sse2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_12_sse2;
|
2014-07-25 18:55:40 +03:00
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_SSSE3(cpu_flags) && ARCH_X86_64) {
|
2014-07-25 18:55:40 +03:00
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_12_ssse3;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_12_ssse3;
|
|
|
|
}
|
2014-07-27 01:19:25 +03:00
|
|
|
if (EXTERNAL_SSE4(cpu_flags) && ARCH_X86_64) {
|
2014-07-25 19:55:23 +03:00
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 0, pel_pixels, 12, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 0, 1, epel_h, 12, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 0, epel_v, 12, sse4);
|
|
|
|
EPEL_LINKS(c->put_hevc_epel, 1, 1, epel_hv, 12, sse4);
|
|
|
|
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 0, pel_pixels, 12, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 0, 1, qpel_h, 12, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 0, qpel_v, 12, sse4);
|
|
|
|
QPEL_LINKS(c->put_hevc_qpel, 1, 1, qpel_hv, 12, sse4);
|
|
|
|
}
|
2014-07-29 10:30:13 +03:00
|
|
|
if (EXTERNAL_AVX(cpu_flags)) {
|
|
|
|
c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_12_avx;
|
|
|
|
c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_12_avx;
|
|
|
|
if (ARCH_X86_64) {
|
|
|
|
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_12_avx;
|
|
|
|
c->hevc_h_loop_filter_luma = ff_hevc_h_loop_filter_luma_12_avx;
|
|
|
|
}
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(12, avx);
|
2014-07-29 10:30:13 +03:00
|
|
|
}
|
2014-07-26 21:08:51 +03:00
|
|
|
if (EXTERNAL_AVX2(cpu_flags)) {
|
2016-02-07 05:52:06 +02:00
|
|
|
c->sao_band_filter[0] = ff_hevc_sao_band_filter_8_12_avx2;
|
|
|
|
}
|
|
|
|
if (EXTERNAL_AVX2_FAST(cpu_flags)) {
|
2017-01-31 17:50:21 +02:00
|
|
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_12_avx2;
|
|
|
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_12_avx2;
|
2015-02-10 04:18:32 +02:00
|
|
|
|
2015-02-10 01:38:20 +02:00
|
|
|
SAO_BAND_INIT(12, avx2);
|
2015-12-06 07:47:45 +02:00
|
|
|
SAO_EDGE_INIT(12, avx2);
|
2014-07-26 21:08:51 +03:00
|
|
|
}
|
2014-04-28 17:12:28 +03:00
|
|
|
}
|
|
|
|
}
|