1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  ARM: allow runtime masking of CPU features
  dsputil: remove unused functions
  mov: Treat keyframe indexes as 1-origin if starting at non-zero.
  mov: Take stps entries into consideration also about key_off.
  Remove lowres video decoding

Conflicts:
	ffmpeg.c
	ffplay.c
	libavcodec/arm/vp8dsp_init_arm.c
	libavcodec/libopenjpegdec.c
	libavcodec/mjpegdec.c
	libavcodec/mpegvideo.c
	libavcodec/utils.c
	libavformat/mov.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-04-22 22:26:42 +02:00
commit 92ef4be4ab
52 changed files with 219 additions and 841 deletions

View File

@ -565,6 +565,14 @@ int opt_cpuflags(const char *opt, const char *arg)
{ "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_FMA4 }, .unit = "flags" }, { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_FMA4 }, .unit = "flags" },
{ "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_3DNOW }, .unit = "flags" }, { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_3DNOW }, .unit = "flags" },
{ "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" }, { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_3DNOWEXT }, .unit = "flags" },
{ "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ARMV5TE }, .unit = "flags" },
{ "armv6", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ARMV6 }, .unit = "flags" },
{ "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" },
{ "vfp", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_VFP }, .unit = "flags" },
{ "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_VFPV3 }, .unit = "flags" },
{ "neon", NULL, 0, AV_OPT_TYPE_CONST, { AV_CPU_FLAG_NEON }, .unit = "flags" },
{ NULL }, { NULL },
}; };
static const AVClass class = { static const AVClass class = {

View File

@ -260,7 +260,6 @@ static int64_t duration = AV_NOPTS_VALUE;
static int workaround_bugs = 1; static int workaround_bugs = 1;
static int fast = 0; static int fast = 0;
static int genpts = 0; static int genpts = 0;
static int lowres = 0;
static int idct = FF_IDCT_AUTO; static int idct = FF_IDCT_AUTO;
static enum AVDiscard skip_frame = AVDISCARD_DEFAULT; static enum AVDiscard skip_frame = AVDISCARD_DEFAULT;
static enum AVDiscard skip_idct = AVDISCARD_DEFAULT; static enum AVDiscard skip_idct = AVDISCARD_DEFAULT;
@ -1325,7 +1324,7 @@ static void alloc_picture(void *opaque)
/* SDL allocates a buffer smaller than requested if the video /* SDL allocates a buffer smaller than requested if the video
* overlay hardware is unable to support the requested size. */ * overlay hardware is unable to support the requested size. */
fprintf(stderr, "Error: the video system does not support an image\n" fprintf(stderr, "Error: the video system does not support an image\n"
"size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n" "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
"to reduce the image size.\n", vp->width, vp->height ); "to reduce the image size.\n", vp->width, vp->height );
do_exit(is); do_exit(is);
} }
@ -3085,7 +3084,6 @@ static const OptionDef options[] = {
{ "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" }, { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
{ "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" }, { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
{ "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""}, { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
{ "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
{ "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" }, { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
{ "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" }, { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
{ "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" }, { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },

View File

@ -336,7 +336,7 @@ void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
put_pixels_clamped_axp_p = c->put_pixels_clamped; put_pixels_clamped_axp_p = c->put_pixels_clamped;
add_pixels_clamped_axp_p = c->add_pixels_clamped; add_pixels_clamped_axp_p = c->add_pixels_clamped;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && if (avctx->bits_per_raw_sample <= 8 &&
(avctx->idct_algo == FF_IDCT_AUTO || (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) { avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) {
c->idct_put = ff_simple_idct_put_axp; c->idct_put = ff_simple_idct_put_axp;

View File

@ -19,6 +19,8 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavcodec/ac3dsp.h" #include "libavcodec/ac3dsp.h"
#include "config.h" #include "config.h"
@ -47,13 +49,15 @@ void ff_ac3_update_bap_counts_arm(uint16_t mant_cnt[16], uint8_t *bap, int len);
av_cold void ff_ac3dsp_init_arm(AC3DSPContext *c, int bit_exact) av_cold void ff_ac3dsp_init_arm(AC3DSPContext *c, int bit_exact)
{ {
int cpu_flags = av_get_cpu_flags();
c->update_bap_counts = ff_ac3_update_bap_counts_arm; c->update_bap_counts = ff_ac3_update_bap_counts_arm;
if (HAVE_ARMV6) { if (have_armv6(cpu_flags)) {
c->bit_alloc_calc_bap = ff_ac3_bit_alloc_calc_bap_armv6; c->bit_alloc_calc_bap = ff_ac3_bit_alloc_calc_bap_armv6;
} }
if (HAVE_NEON) { if (have_neon(cpu_flags)) {
c->ac3_exponent_min = ff_ac3_exponent_min_neon; c->ac3_exponent_min = ff_ac3_exponent_min_neon;
c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_neon; c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_neon;
c->ac3_lshift_int16 = ff_ac3_lshift_int16_neon; c->ac3_lshift_int16 = ff_ac3_lshift_int16_neon;

View File

@ -19,6 +19,8 @@
*/ */
#include "config.h" #include "config.h"
#include "libavutil/arm/cpu.h"
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavcodec/dcadsp.h" #include "libavcodec/dcadsp.h"
@ -27,6 +29,8 @@ void ff_dca_lfe_fir_neon(float *out, const float *in, const float *coefs,
av_cold void ff_dcadsp_init_arm(DCADSPContext *s) av_cold void ff_dcadsp_init_arm(DCADSPContext *s)
{ {
if (HAVE_NEON) int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
s->lfe_fir = ff_dca_lfe_fir_neon; s->lfe_fir = ff_dca_lfe_fir_neon;
} }

View File

@ -19,6 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/arm/cpu.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "dsputil_arm.h" #include "dsputil_arm.h"
@ -76,11 +77,12 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx) void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
{ {
const int high_bit_depth = avctx->bits_per_raw_sample > 8; const int high_bit_depth = avctx->bits_per_raw_sample > 8;
int cpu_flags = av_get_cpu_flags();
ff_put_pixels_clamped = c->put_pixels_clamped; ff_put_pixels_clamped = c->put_pixels_clamped;
ff_add_pixels_clamped = c->add_pixels_clamped; ff_add_pixels_clamped = c->add_pixels_clamped;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { if (avctx->bits_per_raw_sample <= 8) {
if(avctx->idct_algo == FF_IDCT_AUTO || if(avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_ARM){ avctx->idct_algo == FF_IDCT_ARM){
c->idct_put = j_rev_dct_arm_put; c->idct_put = j_rev_dct_arm_put;
@ -117,8 +119,8 @@ void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
c->put_no_rnd_pixels_tab[1][3] = ff_put_no_rnd_pixels8_xy2_arm; c->put_no_rnd_pixels_tab[1][3] = ff_put_no_rnd_pixels8_xy2_arm;
} }
if (HAVE_ARMV5TE) ff_dsputil_init_armv5te(c, avctx); if (have_armv5te(cpu_flags)) ff_dsputil_init_armv5te(c, avctx);
if (HAVE_ARMV6) ff_dsputil_init_armv6(c, avctx); if (have_armv6(cpu_flags)) ff_dsputil_init_armv6(c, avctx);
if (HAVE_ARMVFP) ff_dsputil_init_vfp(c, avctx); if (have_vfp(cpu_flags)) ff_dsputil_init_vfp(c, avctx);
if (HAVE_NEON) ff_dsputil_init_neon(c, avctx); if (have_neon(cpu_flags)) ff_dsputil_init_neon(c, avctx);
} }

View File

@ -29,7 +29,7 @@ void ff_prefetch_arm(void *mem, int stride, int h);
av_cold void ff_dsputil_init_armv5te(DSPContext *c, AVCodecContext *avctx) av_cold void ff_dsputil_init_armv5te(DSPContext *c, AVCodecContext *avctx)
{ {
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && if (avctx->bits_per_raw_sample <= 8 &&
(avctx->idct_algo == FF_IDCT_AUTO || (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) { avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) {
c->idct_put = ff_simple_idct_put_armv5te; c->idct_put = ff_simple_idct_put_armv5te;

View File

@ -74,7 +74,7 @@ av_cold void ff_dsputil_init_armv6(DSPContext *c, AVCodecContext *avctx)
{ {
const int high_bit_depth = avctx->bits_per_raw_sample > 8; const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && if (avctx->bits_per_raw_sample <= 8 &&
(avctx->idct_algo == FF_IDCT_AUTO || (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) { avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) {
c->idct_put = ff_simple_idct_put_armv6; c->idct_put = ff_simple_idct_put_armv6;

View File

@ -182,7 +182,7 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
{ {
const int high_bit_depth = avctx->bits_per_raw_sample > 8; const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { if (avctx->bits_per_raw_sample <= 8) {
if (avctx->idct_algo == FF_IDCT_AUTO || if (avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLENEON) { avctx->idct_algo == FF_IDCT_SIMPLENEON) {
c->idct_put = ff_simple_idct_put_neon; c->idct_put = ff_simple_idct_put_neon;

View File

@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/arm/cpu.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "dsputil_arm.h" #include "dsputil_arm.h"
@ -28,7 +29,9 @@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
void ff_dsputil_init_vfp(DSPContext* c, AVCodecContext *avctx) void ff_dsputil_init_vfp(DSPContext* c, AVCodecContext *avctx)
{ {
if (!HAVE_VFPV3) int cpu_flags = av_get_cpu_flags();
if (!have_vfpv3(cpu_flags))
c->vector_fmul = ff_vector_fmul_vfp; c->vector_fmul = ff_vector_fmul_vfp;
c->vector_fmul_reverse = ff_vector_fmul_reverse_vfp; c->vector_fmul_reverse = ff_vector_fmul_reverse_vfp;
} }

View File

@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/arm/cpu.h"
#define CONFIG_FFT_FLOAT 0 #define CONFIG_FFT_FLOAT 0
#include "libavcodec/fft.h" #include "libavcodec/fft.h"
@ -27,7 +29,9 @@ void ff_mdct_fixed_calcw_neon(FFTContext *s, FFTDouble *o, const FFTSample *i);
av_cold void ff_fft_fixed_init_arm(FFTContext *s) av_cold void ff_fft_fixed_init_arm(FFTContext *s)
{ {
if (HAVE_NEON) { int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
s->fft_permutation = FF_FFT_PERM_SWAP_LSBS; s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
s->fft_calc = ff_fft_fixed_calc_neon; s->fft_calc = ff_fft_fixed_calc_neon;

View File

@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/arm/cpu.h"
#include "libavcodec/fft.h" #include "libavcodec/fft.h"
#include "libavcodec/rdft.h" #include "libavcodec/rdft.h"
#include "libavcodec/synth_filter.h" #include "libavcodec/synth_filter.h"
@ -39,7 +40,9 @@ void ff_synth_filter_float_neon(FFTContext *imdct,
av_cold void ff_fft_init_arm(FFTContext *s) av_cold void ff_fft_init_arm(FFTContext *s)
{ {
if (HAVE_NEON) { int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
s->fft_permute = ff_fft_permute_neon; s->fft_permute = ff_fft_permute_neon;
s->fft_calc = ff_fft_calc_neon; s->fft_calc = ff_fft_calc_neon;
#if CONFIG_MDCT #if CONFIG_MDCT
@ -54,7 +57,9 @@ av_cold void ff_fft_init_arm(FFTContext *s)
#if CONFIG_RDFT #if CONFIG_RDFT
av_cold void ff_rdft_init_arm(RDFTContext *s) av_cold void ff_rdft_init_arm(RDFTContext *s)
{ {
if (HAVE_NEON) int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
s->rdft_calc = ff_rdft_calc_neon; s->rdft_calc = ff_rdft_calc_neon;
} }
#endif #endif
@ -62,7 +67,9 @@ av_cold void ff_rdft_init_arm(RDFTContext *s)
#if CONFIG_DCA_DECODER #if CONFIG_DCA_DECODER
av_cold void ff_synth_filter_init_arm(SynthFilterContext *s) av_cold void ff_synth_filter_init_arm(SynthFilterContext *s)
{ {
if (HAVE_NEON) int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
s->synth_filter_float = ff_synth_filter_float_neon; s->synth_filter_float = ff_synth_filter_float_neon;
} }
#endif #endif

View File

@ -20,6 +20,7 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavcodec/fmtconvert.h" #include "libavcodec/fmtconvert.h"
@ -33,11 +34,13 @@ void ff_float_to_int16_vfp(int16_t *dst, const float *src, long len);
void ff_fmt_convert_init_arm(FmtConvertContext *c, AVCodecContext *avctx) void ff_fmt_convert_init_arm(FmtConvertContext *c, AVCodecContext *avctx)
{ {
if (HAVE_ARMVFP && HAVE_ARMV6) { int cpu_flags = av_get_cpu_flags();
if (have_vfp(cpu_flags) && have_armv6(cpu_flags)) {
c->float_to_int16 = ff_float_to_int16_vfp; c->float_to_int16 = ff_float_to_int16_vfp;
} }
if (HAVE_NEON) { if (have_neon(cpu_flags)) {
c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_neon; c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_neon;
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {

View File

@ -20,6 +20,7 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "libavcodec/h264dsp.h" #include "libavcodec/h264dsp.h"
@ -99,5 +100,8 @@ static void ff_h264dsp_init_neon(H264DSPContext *c, const int bit_depth, const i
void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth, const int chroma_format_idc) void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
{ {
if (HAVE_NEON) ff_h264dsp_init_neon(c, bit_depth, chroma_format_idc); int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
ff_h264dsp_init_neon(c, bit_depth, chroma_format_idc);
} }

View File

@ -20,6 +20,7 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/h264pred.h" #include "libavcodec/h264pred.h"
void ff_pred16x16_vert_neon(uint8_t *src, int stride); void ff_pred16x16_vert_neon(uint8_t *src, int stride);
@ -77,5 +78,8 @@ static void ff_h264_pred_init_neon(H264PredContext *h, int codec_id, const int b
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, int bit_depth, const int chroma_format_idc) void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, int bit_depth, const int chroma_format_idc)
{ {
if (HAVE_NEON) ff_h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc); int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
ff_h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc);
} }

View File

@ -19,6 +19,8 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/mpegaudiodsp.h" #include "libavcodec/mpegaudiodsp.h"
#include "config.h" #include "config.h"
@ -27,7 +29,9 @@ void ff_mpadsp_apply_window_fixed_armv6(int32_t *synth_buf, int32_t *window,
void ff_mpadsp_init_arm(MPADSPContext *s) void ff_mpadsp_init_arm(MPADSPContext *s)
{ {
if (HAVE_ARMV6) { int cpu_flags = av_get_cpu_flags();
if (have_armv6(cpu_flags)) {
s->apply_window_fixed = ff_mpadsp_apply_window_fixed_armv6; s->apply_window_fixed = ff_mpadsp_apply_window_fixed_armv6;
} }
} }

View File

@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "libavutil/arm/cpu.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideo.h"
@ -40,11 +41,12 @@ void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, DCTELEM *block,
void ff_MPV_common_init_arm(MpegEncContext *s) void ff_MPV_common_init_arm(MpegEncContext *s)
{ {
#if HAVE_ARMV5TE int cpu_flags = av_get_cpu_flags();
ff_MPV_common_init_armv5te(s);
#endif
if (HAVE_NEON) { if (have_armv5te(cpu_flags))
ff_MPV_common_init_armv5te(s);
if (have_neon(cpu_flags)) {
s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon; s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon;
s->dct_unquantize_h263_inter = ff_dct_unquantize_h263_inter_neon; s->dct_unquantize_h263_inter = ff_dct_unquantize_h263_inter_neon;
} }

View File

@ -19,6 +19,7 @@
*/ */
#include "config.h" #include "config.h"
#include "libavutil/arm/cpu.h"
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavcodec/sbrdsp.h" #include "libavcodec/sbrdsp.h"
@ -51,7 +52,9 @@ void ff_sbr_hf_apply_noise_3_neon(float Y[64][2], const float *s_m,
av_cold void ff_sbrdsp_init_arm(SBRDSPContext *s) av_cold void ff_sbrdsp_init_arm(SBRDSPContext *s)
{ {
if (HAVE_NEON) { int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
s->sum64x5 = ff_sbr_sum64x5_neon; s->sum64x5 = ff_sbr_sum64x5_neon;
s->sum_square = ff_sbr_sum_square_neon; s->sum_square = ff_sbr_sum_square_neon;
s->neg_odd_64 = ff_sbr_neg_odd_64_neon; s->neg_odd_64 = ff_sbr_neg_odd_64_neon;

View File

@ -19,6 +19,8 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavcodec/vp56dsp.h" #include "libavcodec/vp56dsp.h"
@ -27,7 +29,9 @@ void ff_vp6_edge_filter_ver_neon(uint8_t *yuv, int stride, int t);
void ff_vp56dsp_init_arm(VP56DSPContext *s, enum CodecID codec) void ff_vp56dsp_init_arm(VP56DSPContext *s, enum CodecID codec)
{ {
if (codec != CODEC_ID_VP5 && HAVE_NEON) { int cpu_flags = av_get_cpu_flags();
if (codec != CODEC_ID_VP5 && have_neon(cpu_flags)) {
s->edge_filter_hor = ff_vp6_edge_filter_hor_neon; s->edge_filter_hor = ff_vp6_edge_filter_hor_neon;
s->edge_filter_ver = ff_vp6_edge_filter_ver_neon; s->edge_filter_ver = ff_vp6_edge_filter_ver_neon;
} }

View File

@ -17,6 +17,8 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include "libavutil/arm/cpu.h"
#include "libavcodec/vp8dsp.h" #include "libavcodec/vp8dsp.h"
void ff_vp8_luma_dc_wht_dc_armv6(DCTELEM block[4][4][16], DCTELEM dc[16]); void ff_vp8_luma_dc_wht_dc_armv6(DCTELEM block[4][4][16], DCTELEM dc[16]);
@ -301,7 +303,9 @@ av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp)
dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_##opt; \ dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_##opt; \
dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_##opt; \ dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_##opt; \
dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_##opt dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_##opt
if (HAVE_NEON) { int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
set_func_ptrs(neon); set_func_ptrs(neon);
} else if (HAVE_ARMV6) { } else if (HAVE_ARMV6) {
set_func_ptrs(armv6); set_func_ptrs(armv6);

View File

@ -1451,7 +1451,7 @@ typedef struct AVCodecContext {
int width, height; int width, height;
/** /**
* Bitstream width / height, may be different from width/height if lowres enabled. * Bitstream width / height, may be different from width/height.
* - encoding: unused * - encoding: unused
* - decoding: Set by user before init if known. Codec should override / dynamically change if needed. * - decoding: Set by user before init if known. Codec should override / dynamically change if needed.
*/ */
@ -2649,7 +2649,7 @@ typedef struct AVCodecContext {
* - encoding: unused * - encoding: unused
* - decoding: Set by user. * - decoding: Set by user.
*/ */
int lowres; attribute_deprecated int lowres;
/** /**
* the picture in the bitstream * the picture in the bitstream
@ -2920,7 +2920,7 @@ typedef struct AVCodec {
const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
uint8_t max_lowres; ///< maximum value for lowres supported by the decoder attribute_deprecated uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
const AVClass *priv_class; ///< AVClass for the private context const AVClass *priv_class; ///< AVClass for the private context
const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}

View File

@ -381,38 +381,6 @@ void ff_put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
} }
} }
static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels += line_size;
block += 8;
}
}
void ff_put_signed_pixels_clamped_c(const DCTELEM *block, void ff_put_signed_pixels_clamped_c(const DCTELEM *block,
uint8_t *restrict pixels, uint8_t *restrict pixels,
int line_size) int line_size)
@ -454,36 +422,6 @@ void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
} }
} }
static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels += line_size;
block += 8;
}
}
static int sum_abs_dctelem_c(DCTELEM *block) static int sum_abs_dctelem_c(DCTELEM *block)
{ {
int sum=0, i; int sum=0, i;
@ -2746,37 +2684,6 @@ static void ff_jref_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
ff_add_pixels_clamped_c(block, dest, line_size); ff_add_pixels_clamped_c(block, dest, line_size);
} }
static void ff_jref_idct4_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_j_rev_dct4 (block);
put_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct4_add(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_j_rev_dct4 (block);
add_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct2_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_j_rev_dct2 (block);
put_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
{
dest[0] = av_clip_uint8((block[0] + 4)>>3);
}
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
{
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
}
static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; } static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
/* init static data */ /* init static data */
@ -2843,22 +2750,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
} }
#endif //CONFIG_ENCODERS #endif //CONFIG_ENCODERS
if(avctx->lowres==1){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
c->idct = ff_j_rev_dct4;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==2){
c->idct_put= ff_jref_idct2_put;
c->idct_add= ff_jref_idct2_add;
c->idct = ff_j_rev_dct2;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==3){
c->idct_put= ff_jref_idct1_put;
c->idct_add= ff_jref_idct1_add;
c->idct = ff_j_rev_dct1;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{
if (avctx->bits_per_raw_sample == 10) { if (avctx->bits_per_raw_sample == 10) {
c->idct_put = ff_simple_idct_put_10; c->idct_put = ff_simple_idct_put_10;
c->idct_add = ff_simple_idct_add_10; c->idct_add = ff_simple_idct_add_10;
@ -2896,7 +2787,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type= FF_NO_IDCT_PERM; c->idct_permutation_type= FF_NO_IDCT_PERM;
} }
} }
}
c->diff_pixels = diff_pixels_c; c->diff_pixels = diff_pixels_c;
c->put_pixels_clamped = ff_put_pixels_clamped_c; c->put_pixels_clamped = ff_put_pixels_clamped_c;

View File

@ -46,9 +46,6 @@ void ff_fdct248_islow_8(DCTELEM *data);
void ff_fdct248_islow_10(DCTELEM *data); void ff_fdct248_islow_10(DCTELEM *data);
void ff_j_rev_dct (DCTELEM *data); void ff_j_rev_dct (DCTELEM *data);
void ff_j_rev_dct4 (DCTELEM *data);
void ff_j_rev_dct2 (DCTELEM *data);
void ff_j_rev_dct1 (DCTELEM *data);
void ff_wmv2_idct_c(DCTELEM *data); void ff_wmv2_idct_c(DCTELEM *data);
void ff_fdct_mmx(DCTELEM *block); void ff_fdct_mmx(DCTELEM *block);

View File

@ -311,12 +311,6 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx)
/* 248DCT setup */ /* 248DCT setup */
s->fdct[1] = dsp.fdct248; s->fdct[1] = dsp.fdct248;
s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP
if (avctx->lowres){
for (i = 0; i < 64; i++){
int j = ff_zigzag248_direct[i];
s->dv_zigzag[1][i] = dsp.idct_permutation[(j & 7) + (j & 8) * 4 + (j & 48) / 2];
}
}else
memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
avctx->coded_frame = &s->picture; avctx->coded_frame = &s->picture;

View File

@ -144,7 +144,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]);
LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [ 80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [ 80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */
LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5*80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5*80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */
const int log2_blocksize = 3-s->avctx->lowres; const int log2_blocksize = 3;
int is_field_mode[5]; int is_field_mode[5];
assert((((int)mb_bit_buffer) & 7) == 0); assert((((int)mb_bit_buffer) & 7) == 0);
@ -382,6 +382,5 @@ AVCodec ff_dvvideo_decoder = {
.close = dvvideo_close, .close = dvvideo_close,
.decode = dvvideo_decode_frame, .decode = dvvideo_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
}; };

View File

@ -907,7 +907,7 @@ void ff_er_frame_end(MpegEncContext *s)
/* We do not support ER of field pictures yet, /* We do not support ER of field pictures yet,
* though it should not crash if enabled. */ * though it should not crash if enabled. */
if (!s->err_recognition || s->error_count == 0 || s->avctx->lowres || if (!s->err_recognition || s->error_count == 0 ||
s->avctx->hwaccel || s->avctx->hwaccel ||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
s->picture_structure != PICT_FRAME || s->picture_structure != PICT_FRAME ||

View File

@ -127,7 +127,6 @@ AVCodec ff_flv_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -658,6 +658,5 @@ AVCodec ff_h261_decoder = {
.close = h261_decode_end, .close = h261_decode_end,
.decode = h261_decode_frame, .decode = h261_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("H.261"), .long_name = NULL_IF_CONFIG_SMALL("H.261"),
}; };

View File

@ -151,7 +151,7 @@ static int get_consumed_bytes(MpegEncContext *s, int buf_size){
static int decode_slice(MpegEncContext *s){ static int decode_slice(MpegEncContext *s){
const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F; const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F;
const int mb_size= 16>>s->avctx->lowres; const int mb_size = 16;
s->last_resync_gb= s->gb; s->last_resync_gb= s->gb;
s->first_slice_line= 1; s->first_slice_line= 1;
@ -764,7 +764,6 @@ AVCodec ff_h263_decoder = {
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
.pix_fmts = ff_hwaccel_pixfmt_list_420, .pix_fmts = ff_hwaccel_pixfmt_list_420,
}; };

View File

@ -716,7 +716,6 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w)
* The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function. * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function.
* The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function. * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function.
* This function does not use MPV_decode_mb(). * This function does not use MPV_decode_mb().
* lowres decoding is theoretically impossible.
* @param w pointer to IntraX8Context * @param w pointer to IntraX8Context
* @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1. * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
* @param quant_offset offset away from zero * @param quant_offset offset away from zero

View File

@ -940,216 +940,3 @@ void ff_j_rev_dct(DCTBLOCK data)
dataptr++; /* advance pointer to next column */ dataptr++; /* advance pointer to next column */
} }
} }
#undef DCTSIZE
#define DCTSIZE 4
#define DCTSTRIDE 8
void ff_j_rev_dct4(DCTBLOCK data)
{
int32_t tmp0, tmp1, tmp2, tmp3;
int32_t tmp10, tmp11, tmp12, tmp13;
int32_t z1;
int32_t d0, d2, d4, d6;
register DCTELEM *dataptr;
int rowctr;
/* Pass 1: process rows. */
/* Note results are scaled up by sqrt(8) compared to a true IDCT; */
/* furthermore, we scale the results by 2**PASS1_BITS. */
data[0] += 4;
dataptr = data;
for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
/* Due to quantization, we will usually find that many of the input
* coefficients are zero, especially the AC terms. We can exploit this
* by short-circuiting the IDCT calculation for any row in which all
* the AC terms are zero. In that case each output is equal to the
* DC coefficient (with scale factor as needed).
* With typical images and quantization tables, half or more of the
* row DCT calculations can be simplified this way.
*/
register int *idataptr = (int*)dataptr;
d0 = dataptr[0];
d2 = dataptr[1];
d4 = dataptr[2];
d6 = dataptr[3];
if ((d2 | d4 | d6) == 0) {
/* AC terms all zero */
if (d0) {
/* Compute a 32 bit value to assign. */
DCTELEM dcval = (DCTELEM) (d0 << PASS1_BITS);
register int v = (dcval & 0xffff) | ((dcval << 16) & 0xffff0000);
idataptr[0] = v;
idataptr[1] = v;
}
dataptr += DCTSTRIDE; /* advance pointer to next row */
continue;
}
/* Even part: reverse the even part of the forward DCT. */
/* The rotator is sqrt(2)*c(-6). */
if (d6) {
if (d2) {
/* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
} else {
/* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
tmp2 = MULTIPLY(-d6, FIX_1_306562965);
tmp3 = MULTIPLY(d6, FIX_0_541196100);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
}
} else {
if (d2) {
/* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
tmp2 = MULTIPLY(d2, FIX_0_541196100);
tmp3 = MULTIPLY(d2, FIX_1_306562965);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
} else {
/* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
}
}
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
dataptr[0] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
dataptr[1] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
dataptr[2] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
dataptr += DCTSTRIDE; /* advance pointer to next row */
}
/* Pass 2: process columns. */
/* Note that we must descale the results by a factor of 8 == 2**3, */
/* and also undo the PASS1_BITS scaling. */
dataptr = data;
for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) {
/* Columns of zeroes can be exploited in the same way as we did with rows.
* However, the row calculation has created many nonzero AC terms, so the
* simplification applies less often (typically 5% to 10% of the time).
* On machines with very fast multiplication, it's possible that the
* test takes more time than it's worth. In that case this section
* may be commented out.
*/
d0 = dataptr[DCTSTRIDE*0];
d2 = dataptr[DCTSTRIDE*1];
d4 = dataptr[DCTSTRIDE*2];
d6 = dataptr[DCTSTRIDE*3];
/* Even part: reverse the even part of the forward DCT. */
/* The rotator is sqrt(2)*c(-6). */
if (d6) {
if (d2) {
/* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */
z1 = MULTIPLY(d2 + d6, FIX_0_541196100);
tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065);
tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
} else {
/* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */
tmp2 = MULTIPLY(-d6, FIX_1_306562965);
tmp3 = MULTIPLY(d6, FIX_0_541196100);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
}
} else {
if (d2) {
/* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */
tmp2 = MULTIPLY(d2, FIX_0_541196100);
tmp3 = MULTIPLY(d2, FIX_1_306562965);
tmp0 = (d0 + d4) << CONST_BITS;
tmp1 = (d0 - d4) << CONST_BITS;
tmp10 = tmp0 + tmp3;
tmp13 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp12 = tmp1 - tmp2;
} else {
/* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */
tmp10 = tmp13 = (d0 + d4) << CONST_BITS;
tmp11 = tmp12 = (d0 - d4) << CONST_BITS;
}
}
/* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
dataptr[DCTSTRIDE*0] = tmp10 >> (CONST_BITS+PASS1_BITS+3);
dataptr[DCTSTRIDE*1] = tmp11 >> (CONST_BITS+PASS1_BITS+3);
dataptr[DCTSTRIDE*2] = tmp12 >> (CONST_BITS+PASS1_BITS+3);
dataptr[DCTSTRIDE*3] = tmp13 >> (CONST_BITS+PASS1_BITS+3);
dataptr++; /* advance pointer to next column */
}
}
void ff_j_rev_dct2(DCTBLOCK data){
int d00, d01, d10, d11;
data[0] += 4;
d00 = data[0+0*DCTSTRIDE] + data[1+0*DCTSTRIDE];
d01 = data[0+0*DCTSTRIDE] - data[1+0*DCTSTRIDE];
d10 = data[0+1*DCTSTRIDE] + data[1+1*DCTSTRIDE];
d11 = data[0+1*DCTSTRIDE] - data[1+1*DCTSTRIDE];
data[0+0*DCTSTRIDE]= (d00 + d10)>>3;
data[1+0*DCTSTRIDE]= (d01 + d11)>>3;
data[0+1*DCTSTRIDE]= (d00 - d10)>>3;
data[1+1*DCTSTRIDE]= (d01 - d11)>>3;
}
void ff_j_rev_dct1(DCTBLOCK data){
data[0] = (data[0] + 4)>>3;
}
#undef FIX
#undef CONST_BITS

View File

@ -286,7 +286,6 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
} }
ctx->dec_params.cp_limit_decoding = NO_LIMITATION; ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
ctx->dec_params.cp_reduce = avctx->lowres;
// Tie decoder with decoding parameters // Tie decoder with decoding parameters
opj_setup_decoder(dec, &ctx->dec_params); opj_setup_decoder(dec, &ctx->dec_params);
stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);

View File

@ -161,6 +161,5 @@ AVCodec ff_mjpegb_decoder = {
.close = ff_mjpeg_decode_end, .close = ff_mjpeg_decode_end,
.decode = mjpegb_decode_frame, .decode = mjpegb_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"), .long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"),
}; };

View File

@ -941,21 +941,6 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
return 0; return 0;
} }
static av_always_inline void mjpeg_copy_block(uint8_t *dst, const uint8_t *src,
int linesize, int lowres)
{
switch (lowres) {
case 0: copy_block8(dst, src, linesize, linesize, 8);
break;
case 1: copy_block4(dst, src, linesize, linesize, 4);
break;
case 2: copy_block2(dst, src, linesize, linesize, 2);
break;
case 3: *dst = *src;
break;
}
}
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
int Al, const uint8_t *mb_bitmask, int Al, const uint8_t *mb_bitmask,
const AVFrame *reference) const AVFrame *reference)
@ -1018,16 +1003,16 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
x = 0; x = 0;
y = 0; y = 0;
for (j = 0; j < n; j++) { for (j = 0; j < n; j++) {
block_offset = (((linesize[c] * (v * mb_y + y) * 8) + block_offset = ((linesize[c] * (v * mb_y + y) * 8) +
(h * mb_x + x) * 8) >> s->avctx->lowres); (h * mb_x + x) * 8);
if (s->interlaced && s->bottom_field) if (s->interlaced && s->bottom_field)
block_offset += linesize[c] >> 1; block_offset += linesize[c] >> 1;
ptr = data[c] + block_offset; ptr = data[c] + block_offset;
if (!s->progressive) { if (!s->progressive) {
if (copy_mb) if (copy_mb)
mjpeg_copy_block(ptr, reference_data[c] + block_offset, copy_block8(ptr, reference_data[c] + block_offset,
linesize[c], s->avctx->lowres); linesize[c], linesize[c], 8);
else { else {
s->dsp.clear_block(s->block); s->dsp.clear_block(s->block);
if (decode_block(s, s->block, i, if (decode_block(s, s->block, i,
@ -1829,7 +1814,6 @@ AVCodec ff_mjpeg_decoder = {
.close = ff_mjpeg_decode_end, .close = ff_mjpeg_decode_end,
.decode = ff_mjpeg_decode_frame, .decode = ff_mjpeg_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
.priv_class = &mjpegdec_class, .priv_class = &mjpegdec_class,
}; };
@ -1843,6 +1827,5 @@ AVCodec ff_thp_decoder = {
.close = ff_mjpeg_decode_end, .close = ff_mjpeg_decode_end,
.decode = ff_mjpeg_decode_frame, .decode = ff_mjpeg_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"), .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
}; };

View File

@ -1676,7 +1676,6 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
const uint8_t **buf, int buf_size) const uint8_t **buf, int buf_size)
{ {
AVCodecContext *avctx = s->avctx; AVCodecContext *avctx = s->avctx;
const int lowres = s->avctx->lowres;
const int field_pic = s->picture_structure != PICT_FRAME; const int field_pic = s->picture_structure != PICT_FRAME;
s->resync_mb_x = s->resync_mb_x =
@ -1797,14 +1796,14 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
} }
} }
s->dest[0] += 16 >> lowres; s->dest[0] += 16;
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; s->dest[1] += 16 >> s->chroma_x_shift;
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; s->dest[2] += 16 >> s->chroma_x_shift;
ff_MPV_decode_mb(s, s->block); ff_MPV_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) { if (++s->mb_x >= s->mb_width) {
const int mb_size = 16 >> s->avctx->lowres; const int mb_size = 16;
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size); ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
ff_MPV_report_decode_progress(s); ff_MPV_report_decode_progress(s);
@ -2579,7 +2578,6 @@ AVCodec ff_mpeg1video_decoder = {
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS, CODEC_CAP_SLICE_THREADS,
.flush = flush, .flush = flush,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context) .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)
}; };
@ -2596,7 +2594,6 @@ AVCodec ff_mpeg2video_decoder = {
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS, CODEC_CAP_SLICE_THREADS,
.flush = flush, .flush = flush,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles), .profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles),
}; };

View File

@ -2327,7 +2327,6 @@ AVCodec ff_mpeg4_decoder = {
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_FRAME_THREADS, CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
.pix_fmts = ff_hwaccel_pixfmt_list_420, .pix_fmts = ff_hwaccel_pixfmt_list_420,
.profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),

View File

@ -1784,381 +1784,6 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
} }
} }
static inline int hpel_motion_lowres(MpegEncContext *s,
uint8_t *dest, uint8_t *src,
int field_based, int field_select,
int src_x, int src_y,
int width, int height, int stride,
int h_edge_pos, int v_edge_pos,
int w, int h, h264_chroma_mc_func *pix_op,
int motion_x, int motion_y)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int s_mask = (2 << lowres) - 1;
int emu = 0;
int sx, sy;
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x += motion_x >> lowres + 1;
src_y += motion_y >> lowres + 1;
src += src_y * stride + src_x;
if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
(unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
(h + 1) << field_based, src_x,
src_y << field_based,
h_edge_pos,
v_edge_pos);
src = s->edge_emu_buffer;
emu = 1;
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
if (field_select)
src += s->linesize;
pix_op[op_index](dest, src, stride, h, sx, sy);
return emu;
}
/* apply one mpeg motion vector to the three components */
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *dest_y,
uint8_t *dest_cb,
uint8_t *dest_cr,
int field_based,
int bottom_field,
int field_select,
uint8_t **ref_picture,
h264_chroma_mc_func *pix_op,
int motion_x, int motion_y,
int h, int mb_y)
{
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
uvsx, uvsy;
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
const int block_s = 8>>lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres;
const int v_edge_pos = s->v_edge_pos >> lowres;
linesize = s->current_picture.f.linesize[0] << field_based;
uvlinesize = s->current_picture.f.linesize[1] << field_based;
// FIXME obviously not perfect but qpel will not work in lowres anyway
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
if(field_based){
motion_y += (bottom_field - field_select)*((1 << lowres)-1);
}
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
if (s->out_format == FMT_H263) {
uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
uvsrc_x = src_x >> 1;
uvsrc_y = src_y >> 1;
} else if (s->out_format == FMT_H261) {
// even chroma mv's are full pel in H261
mx = motion_x / 4;
my = motion_y / 4;
uvsx = (2 * mx) & s_mask;
uvsy = (2 * my) & s_mask;
uvsrc_x = s->mb_x * block_s + (mx >> lowres);
uvsrc_y = mb_y * block_s + (my >> lowres);
} else {
if(s->chroma_y_shift){
mx = motion_x / 2;
my = motion_y / 2;
uvsx = mx & s_mask;
uvsy = my & s_mask;
uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
} else {
if(s->chroma_x_shift){
//Chroma422
mx = motion_x / 2;
uvsx = mx & s_mask;
uvsy = motion_y & s_mask;
uvsrc_y = src_y;
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
} else {
//Chroma444
uvsx = motion_x & s_mask;
uvsy = motion_y & s_mask;
uvsrc_x = src_x;
uvsrc_y = src_y;
}
}
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->linesize, 17, 17 + field_based,
src_x, src_y << field_based, h_edge_pos,
v_edge_pos);
ptr_y = s->edge_emu_buffer;
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
ptr_cb = uvbuf;
ptr_cr = uvbuf + 16;
}
}
// FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
if (bottom_field) {
dest_y += s->linesize;
dest_cb += s->uvlinesize;
dest_cr += s->uvlinesize;
}
if (field_select) {
ptr_y += s->linesize;
ptr_cb += s->uvlinesize;
ptr_cr += s->uvlinesize;
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uvsx = (uvsx << 2) >> lowres;
uvsy = (uvsy << 2) >> lowres;
if (h >> s->chroma_y_shift) {
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
}
}
// FIXME h261 lowres loop filter
}
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t **ref_picture,
h264_chroma_mc_func * pix_op,
int mx, int my)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int block_s = 8 >> lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
const int v_edge_pos = s->v_edge_pos >> lowres + 1;
int emu = 0, src_x, src_y, offset, sx, sy;
uint8_t *ptr;
if (s->quarter_sample) {
mx /= 2;
my /= 2;
}
/* In case of 8X8, we construct a single chroma motion vector
with a special rounding */
mx = ff_h263_round_chroma(mx);
my = ff_h263_round_chroma(my);
sx = mx & s_mask;
sy = my & s_mask;
src_x = s->mb_x * block_s + (mx >> lowres + 1);
src_y = s->mb_y * block_s + (my >> lowres + 1);
offset = src_y * s->uvlinesize + src_x;
ptr = ref_picture[1] + offset;
if (s->flags & CODEC_FLAG_EMU_EDGE) {
if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
(unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
emu = 1;
}
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
ptr = ref_picture[2] + offset;
if (emu) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
}
pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
}
/**
* motion compensation of a single macroblock
* @param s context
* @param dest_y luma destination pointer
* @param dest_cb chroma cb/u destination pointer
* @param dest_cr chroma cr/v destination pointer
* @param dir direction (0->forward, 1->backward)
* @param ref_picture array[3] of pointers to the 3 planes of the reference picture
* @param pix_op halfpel motion compensation function (average or put normally)
* the motion vectors are taken from s->mv and the MV type from s->mv_type
*/
static inline void MPV_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr,
int dir, uint8_t **ref_picture,
h264_chroma_mc_func *pix_op)
{
int mx, my;
int mb_x, mb_y, i;
const int lowres = s->avctx->lowres;
const int block_s = 8 >>lowres;
mb_x = s->mb_x;
mb_y = s->mb_y;
switch (s->mv_type) {
case MV_TYPE_16X16:
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, 0,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1],
2 * block_s, mb_y);
break;
case MV_TYPE_8X8:
mx = 0;
my = 0;
for (i = 0; i < 4; i++) {
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
s->linesize) * block_s,
ref_picture[0], 0, 0,
(2 * mb_x + (i & 1)) * block_s,
(2 * mb_y + (i >> 1)) * block_s,
s->width, s->height, s->linesize,
s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
block_s, block_s, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1]);
mx += s->mv[dir][i][0];
my += s->mv[dir][i][1];
}
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
pix_op, mx, my);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
/* top field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1],
block_s, mb_y);
/* bottom field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 1, s->field_select[dir][1],
ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1],
block_s, mb_y);
} else {
if (s->picture_structure != s->field_select[dir][0] + 1 &&
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0],
s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
}
break;
case MV_TYPE_16X8:
for (i = 0; i < 2; i++) {
uint8_t **ref2picture;
if (s->picture_structure == s->field_select[dir][i] + 1 ||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
ref2picture = ref_picture;
} else {
ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][i],
ref2picture, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1] +
2 * block_s * i, block_s, mb_y >> 1);
dest_y += 2 * block_s * s->linesize;
dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
}
break;
case MV_TYPE_DMV:
if (s->picture_structure == PICT_FRAME) {
for (i = 0; i < 2; i++) {
int j;
for (j = 0; j < 2; j++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, j, j ^ i,
ref_picture, pix_op,
s->mv[dir][2 * i + j][0],
s->mv[dir][2 * i + j][1],
block_s, mb_y);
}
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
}
} else {
for (i = 0; i < 2; i++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->picture_structure != i + 1,
ref_picture, pix_op,
s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2 * block_s, mb_y >> 1);
// after put we make avg of the same block
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
// opposite parity is always in the same
// frame if this is second field
if (!s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
}
}
break;
default:
assert(0);
}
}
/** /**
* find the lowest MB row referenced in the MVs * find the lowest MB row referenced in the MVs
*/ */
@ -2268,7 +1893,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
*/ */
static av_always_inline static av_always_inline
void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
int lowres_flag, int is_mpeg12) int is_mpeg12)
{ {
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){ if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
@ -2313,8 +1938,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
qpel_mc_func (*op_qpix)[16]; qpel_mc_func (*op_qpix)[16];
const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize = s->current_picture.f.linesize[1]; const int uvlinesize = s->current_picture.f.linesize[1];
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; const int block_size = 8;
/* avoid copy if macroblock skipped in last frame too */ /* avoid copy if macroblock skipped in last frame too */
/* skip only during decoding as we might trash the buffers during encoding a bit */ /* skip only during decoding as we might trash the buffers during encoding a bit */
@ -2363,17 +1988,6 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
} }
} }
if(lowres_flag){
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
}
}else{
op_qpix= s->me.qpel_put; op_qpix= s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab; op_pix = s->dsp.put_pixels_tab;
@ -2389,7 +2003,6 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
} }
} }
}
/* skip dequant / idct if we are really late ;) */ /* skip dequant / idct if we are really late ;) */
if(s->avctx->skip_idct){ if(s->avctx->skip_idct){
@ -2513,12 +2126,10 @@ skip_idct:
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){ void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
#if !CONFIG_SMALL #if !CONFIG_SMALL
if(s->out_format == FMT_MPEG1) { if(s->out_format == FMT_MPEG1) {
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1); MPV_decode_mb_internal(s, block, 1);
else MPV_decode_mb_internal(s, block, 0, 1);
} else } else
#endif #endif
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0); MPV_decode_mb_internal(s, block, 0);
else MPV_decode_mb_internal(s, block, 0, 0);
} }
/** /**
@ -2593,7 +2204,7 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize = s->current_picture.f.linesize[1]; const int uvlinesize = s->current_picture.f.linesize[1];
const int mb_size= 4 - s->avctx->lowres; const int mb_size= 4;
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;

View File

@ -793,7 +793,7 @@ extern const enum PixelFormat ff_pixfmt_list_420[];
extern const enum PixelFormat ff_hwaccel_pixfmt_list_420[]; extern const enum PixelFormat ff_hwaccel_pixfmt_list_420[];
static inline void ff_update_block_index(MpegEncContext *s){ static inline void ff_update_block_index(MpegEncContext *s){
const int block_size= 8>>s->avctx->lowres; const int block_size = 8;
s->block_index[0]+=2; s->block_index[0]+=2;
s->block_index[1]+=2; s->block_index[1]+=2;

View File

@ -1216,7 +1216,6 @@ AVCodec ff_msmpeg4v1_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
@ -1230,7 +1229,6 @@ AVCodec ff_msmpeg4v2_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
@ -1244,7 +1242,6 @@ AVCodec ff_msmpeg4v3_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
@ -1258,7 +1255,6 @@ AVCodec ff_wmv1_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -339,5 +339,4 @@ AVCodec ff_mxpeg_decoder = {
.close = mxpeg_decode_end, .close = mxpeg_decode_end,
.decode = mxpeg_decode_frame, .decode = mxpeg_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
}; };

View File

@ -326,7 +326,6 @@ static const AVOption options[]={
{"dts_hd_ma", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_HD_MA }, INT_MIN, INT_MAX, A|E, "profile"}, {"dts_hd_ma", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_HD_MA }, INT_MIN, INT_MAX, A|E, "profile"},
{"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"}, {"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"}, {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|A|D},
{"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},

View File

@ -187,7 +187,7 @@ void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
} }
#endif //CONFIG_ENCODERS #endif //CONFIG_ENCODERS
if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { if (avctx->bits_per_raw_sample <= 8) {
if ((avctx->idct_algo == FF_IDCT_AUTO) || if ((avctx->idct_algo == FF_IDCT_AUTO) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) { (avctx->idct_algo == FF_IDCT_ALTIVEC)) {
c->idct_put = ff_idct_put_altivec; c->idct_put = ff_idct_put_altivec;

View File

@ -435,7 +435,6 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
// s->obmc=1; // s->obmc=1;
// s->umvplus=1; // s->umvplus=1;
s->modified_quant=1; s->modified_quant=1;
if(!s->avctx->lowres)
s->loop_filter=1; s->loop_filter=1;
if(s->avctx->debug & FF_DEBUG_PICT_INFO){ if(s->avctx->debug & FF_DEBUG_PICT_INFO){
@ -755,7 +754,6 @@ AVCodec ff_rv10_decoder = {
.close = rv10_decode_end, .close = rv10_decode_end,
.decode = rv10_decode_frame, .decode = rv10_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
@ -770,7 +768,6 @@ AVCodec ff_rv20_decoder = {
.decode = rv10_decode_frame, .decode = rv10_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -102,7 +102,6 @@ AVCodec ff_sp5x_decoder = {
.close = ff_mjpeg_decode_end, .close = ff_mjpeg_decode_end,
.decode = sp5x_decode_frame, .decode = sp5x_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"), .long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"),
}; };

View File

@ -149,8 +149,8 @@ unsigned avcodec_get_edge_width(void)
void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
s->coded_width = width; s->coded_width = width;
s->coded_height= height; s->coded_height= height;
s->width = -((-width )>>s->lowres); s->width = width;
s->height= -((-height)>>s->lowres); s->height = height;
} }
#define INTERNAL_BUFFER_SIZE (32+1) #define INTERNAL_BUFFER_SIZE (32+1)
@ -239,9 +239,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
*width = FFALIGN(*width , w_align); *width = FFALIGN(*width , w_align);
*height= FFALIGN(*height, h_align); *height= FFALIGN(*height, h_align);
if(s->codec_id == CODEC_ID_H264 || s->lowres) if (s->codec_id == CODEC_ID_H264)
*height+=2; // some of the optimized chroma MC reads one line too much *height+=2; // some of the optimized chroma MC reads one line too much
// which is also done in mpeg decoders with lowres > 0
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
linesize_align[i] = STRIDE_ALIGN; linesize_align[i] = STRIDE_ALIGN;
@ -844,6 +843,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
goto free_and_end; goto free_and_end;
} }
if (av_codec_is_encoder(avctx->codec)) { if (av_codec_is_encoder(avctx->codec)) {
int i; int i;
if (avctx->codec->sample_fmts) { if (avctx->codec->sample_fmts) {

View File

@ -3203,7 +3203,7 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
if (mm_flags & AV_CPU_FLAG_MMX) { if (mm_flags & AV_CPU_FLAG_MMX) {
const int idct_algo = avctx->idct_algo; const int idct_algo = avctx->idct_algo;
if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { if (avctx->bits_per_raw_sample <= 8) {
if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) { if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
c->idct_put = ff_simple_idct_put_mmx; c->idct_put = ff_simple_idct_put_mmx;
c->idct_add = ff_simple_idct_add_mmx; c->idct_add = ff_simple_idct_add_mmx;

View File

@ -1852,7 +1852,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
unsigned int stts_sample = 0; unsigned int stts_sample = 0;
unsigned int sample_size; unsigned int sample_size;
unsigned int distance = 0; unsigned int distance = 0;
int key_off = sc->keyframe_count && sc->keyframes[0] == 1; int key_off = (sc->keyframe_count && sc->keyframes[0] > 0) || (sc->stps_data && sc->stps_data[0] > 0);
current_dts -= sc->dts_shift; current_dts -= sc->dts_shift;

1
libavutil/arm/Makefile Normal file
View File

@ -0,0 +1 @@
OBJS += arm/cpu.o

30
libavutil/arm/cpu.c Normal file
View File

@ -0,0 +1,30 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/cpu.h"
#include "config.h"
int ff_get_cpu_flags_arm(void)
{
return AV_CPU_FLAG_ARMV5TE * HAVE_ARMV5TE |
AV_CPU_FLAG_ARMV6 * HAVE_ARMV6 |
AV_CPU_FLAG_ARMV6T2 * HAVE_ARMV6T2 |
AV_CPU_FLAG_VFP * HAVE_ARMVFP |
AV_CPU_FLAG_VFPV3 * HAVE_VFPV3 |
AV_CPU_FLAG_NEON * HAVE_NEON;
}

32
libavutil/arm/cpu.h Normal file
View File

@ -0,0 +1,32 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_ARM_CPU_H
#define AVUTIL_ARM_CPU_H
#include "config.h"
#include "libavutil/cpu.h"
#define have_armv5te(flags) (HAVE_ARMV5TE && ((flags) & AV_CPU_FLAG_ARMV5TE))
#define have_armv6(flags) (HAVE_ARMV6 && ((flags) & AV_CPU_FLAG_ARMV6))
#define have_armv6t2(flags) (HAVE_ARMV6T2 && ((flags) & AV_CPU_FLAG_ARMV6T2))
#define have_vfp(flags) (HAVE_ARMVFP && ((flags) & AV_CPU_FLAG_VFP))
#define have_vfpv3(flags) (HAVE_VFPV3 && ((flags) & AV_CPU_FLAG_VFPV3))
#define have_neon(flags) (HAVE_NEON && ((flags) & AV_CPU_FLAG_NEON))
#endif

View File

@ -31,6 +31,7 @@ int av_get_cpu_flags(void)
if (checked) if (checked)
return flags; return flags;
if (ARCH_ARM) flags = ff_get_cpu_flags_arm();
if (ARCH_PPC) flags = ff_get_cpu_flags_ppc(); if (ARCH_PPC) flags = ff_get_cpu_flags_ppc();
if (ARCH_X86) flags = ff_get_cpu_flags_x86(); if (ARCH_X86) flags = ff_get_cpu_flags_x86();
@ -54,7 +55,14 @@ static const struct {
int flag; int flag;
const char *name; const char *name;
} cpu_flag_tab[] = { } cpu_flag_tab[] = {
#if ARCH_PPC #if ARCH_ARM
{ AV_CPU_FLAG_ARMV5TE, "armv5te" },
{ AV_CPU_FLAG_ARMV6, "armv6" },
{ AV_CPU_FLAG_ARMV6T2, "armv6t2" },
{ AV_CPU_FLAG_VFP, "vfp" },
{ AV_CPU_FLAG_VFPV3, "vfpv3" },
{ AV_CPU_FLAG_NEON, "neon" },
#elif ARCH_PPC
{ AV_CPU_FLAG_ALTIVEC, "altivec" }, { AV_CPU_FLAG_ALTIVEC, "altivec" },
#elif ARCH_X86 #elif ARCH_X86
{ AV_CPU_FLAG_MMX, "mmx" }, { AV_CPU_FLAG_MMX, "mmx" },

View File

@ -45,6 +45,13 @@
#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions #define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard #define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
#define AV_CPU_FLAG_ARMV5TE (1 << 0)
#define AV_CPU_FLAG_ARMV6 (1 << 1)
#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
#define AV_CPU_FLAG_VFP (1 << 3)
#define AV_CPU_FLAG_VFPV3 (1 << 4)
#define AV_CPU_FLAG_NEON (1 << 5)
/** /**
* Return the flags which specify extensions supported by the CPU. * Return the flags which specify extensions supported by the CPU.
*/ */
@ -66,6 +73,7 @@ void av_force_cpu_flags(int flags);
attribute_deprecated void av_set_cpu_flags_mask(int mask); attribute_deprecated void av_set_cpu_flags_mask(int mask);
/* The following CPU-specific functions shall not be called directly. */ /* The following CPU-specific functions shall not be called directly. */
int ff_get_cpu_flags_arm(void);
int ff_get_cpu_flags_ppc(void); int ff_get_cpu_flags_ppc(void);
int ff_get_cpu_flags_x86(void); int ff_get_cpu_flags_x86(void);