1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master: (31 commits)
  cdxl demux: do not create packets with uninitialized data at EOF.
  Replace computations of remaining bits with calls to get_bits_left().
  amrnb/amrwb: Remove get_bits usage.
  cosmetics: reindent
  avformat: do not require a pixel/sample format if there is no decoder
  avformat: do not fill-in audio packet duration in compute_pkt_fields()
  lavf: Use av_get_audio_frame_duration() in get_audio_frame_size()
  dca_parser: parse the sample rate and frame durations
  libspeexdec: do not set AVCodecContext.frame_size
  libopencore-amr: do not set AVCodecContext.frame_size
  alsdec: do not set AVCodecContext.frame_size
  siff: do not set AVCodecContext.frame_size
  amr demuxer: do not set AVCodecContext.frame_size.
  aiffdec: do not set AVCodecContext.frame_size
  mov: do not set AVCodecContext.frame_size
  ape: do not set AVCodecContext.frame_size.
  rdt: remove workaround for infinite loop with aac
  avformat: do not require frame_size in avformat_find_stream_info() for CELT
  avformat: do not require frame_size in avformat_find_stream_info() for MP1/2/3
  avformat: do not require frame_size in avformat_find_stream_info() for AAC
  ...

Conflicts:
	doc/APIchanges
	libavcodec/Makefile
	libavcodec/avcodec.h
	libavcodec/h264.c
	libavcodec/h264_ps.c
	libavcodec/utils.c
	libavcodec/version.h
	libavcodec/x86/dsputil_mmx.c
	libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-03-06 03:56:25 +01:00
commit f095391a14
43 changed files with 1219 additions and 885 deletions

View File

@ -35,6 +35,10 @@ API changes, most recent first:
2012-01-24 - xxxxxxx - lavfi 2.60.100
Add avfilter_graph_dump.
2012-xx-xx - lavc 54.8.0
xxxxxxx Add av_get_exact_bits_per_sample()
xxxxxxx Add av_get_audio_frame_duration()
2012-03-xx - xxxxxxx - lavc 54.7.0 - avcodec.h
Add av_codec_is_encoder/decoder().

View File

@ -118,7 +118,8 @@ OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_COOK_DECODER) += cook.o
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o \
dca_parser.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
dirac_arith.o mpeg12data.o dwt.o

View File

@ -1695,7 +1695,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
ctx->reverted_channels = NULL;
}
avctx->frame_size = sconf->frame_length;
channel_size = sconf->frame_length + sconf->max_order;
ctx->prev_raw_samples = av_malloc (sizeof(*ctx->prev_raw_samples) * sconf->max_order);

View File

@ -44,7 +44,6 @@
#include <math.h>
#include "avcodec.h"
#include "get_bits.h"
#include "libavutil/common.h"
#include "celp_math.h"
#include "celp_filters.h"
@ -189,16 +188,11 @@ static av_cold int amrnb_decode_init(AVCodecContext *avctx)
static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
int buf_size)
{
GetBitContext gb;
enum Mode mode;
init_get_bits(&gb, buf, buf_size * 8);
// Decode the first octet.
skip_bits(&gb, 1); // padding bit
mode = get_bits(&gb, 4); // frame type
p->bad_frame_indicator = !get_bits1(&gb); // quality bit
skip_bits(&gb, 2); // two padding bits
mode = buf[0] >> 3 & 0x0F; // frame type
p->bad_frame_indicator = (buf[0] & 0x4) != 0x4; // quality bit
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
return NO_DATA;

View File

@ -27,7 +27,6 @@
#include "libavutil/lfg.h"
#include "avcodec.h"
#include "get_bits.h"
#include "lsp.h"
#include "celp_math.h"
#include "celp_filters.h"
@ -120,14 +119,9 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
*/
static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf)
{
GetBitContext gb;
init_get_bits(&gb, buf, 8);
/* Decode frame header (1st octet) */
skip_bits(&gb, 1); // padding bit
ctx->fr_cur_mode = get_bits(&gb, 4);
ctx->fr_quality = get_bits1(&gb);
skip_bits(&gb, 2); // padding bits
ctx->fr_cur_mode = buf[0] >> 3 & 0x0F;
ctx->fr_quality = (buf[0] & 0x4) != 0x4;
return 1;
}

View File

@ -4128,6 +4128,26 @@ int av_get_bits_per_sample(enum CodecID codec_id);
*/
enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
/**
* Return codec bits per sample.
* Only return non-zero if the bits per sample is exactly correct, not an
* approximation.
*
* @param[in] codec_id the codec
* @return Number of bits per sample or zero if unknown for the given codec.
*/
int av_get_exact_bits_per_sample(enum CodecID codec_id);
/**
* Return audio frame duration.
*
* @param avctx codec context
* @param frame_bytes size of the frame, or 0 if unknown
* @return frame duration, in samples, if known. 0 if not able to
* determine.
*/
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
/* frame parsing */
typedef struct AVCodecParserContext {
void *priv_data;

View File

@ -38,6 +38,7 @@
#include "dcadata.h"
#include "dcahuff.h"
#include "dca.h"
#include "dca_parser.h"
#include "synth_filter.h"
#include "dcadsp.h"
#include "fmtconvert.h"
@ -1381,47 +1382,6 @@ static int dca_decode_block(DCAContext *s, int base_channel, int block_index)
return 0;
}
/**
* Convert bitstream to one representation based on sync marker
*/
static int dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
int max_size)
{
uint32_t mrk;
int i, tmp;
const uint16_t *ssrc = (const uint16_t *) src;
uint16_t *sdst = (uint16_t *) dst;
PutBitContext pb;
if ((unsigned) src_size > (unsigned) max_size) {
// av_log(NULL, AV_LOG_ERROR, "Input frame size larger than DCA_MAX_FRAME_SIZE!\n");
// return -1;
src_size = max_size;
}
mrk = AV_RB32(src);
switch (mrk) {
case DCA_MARKER_RAW_BE:
memcpy(dst, src, src_size);
return src_size;
case DCA_MARKER_RAW_LE:
for (i = 0; i < (src_size + 1) >> 1; i++)
*sdst++ = av_bswap16(*ssrc++);
return src_size;
case DCA_MARKER_14B_BE:
case DCA_MARKER_14B_LE:
init_put_bits(&pb, dst, max_size);
for (i = 0; i < (src_size + 1) >> 1; i++, src += 2) {
tmp = ((mrk == DCA_MARKER_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF;
put_bits(&pb, 14, tmp);
}
flush_put_bits(&pb);
return (put_bits_count(&pb) + 7) >> 3;
default:
return AVERROR_INVALIDDATA;
}
}
/**
* Return the number of channels in an ExSS speaker mask (HD)
*/
@ -1709,8 +1669,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->xch_present = 0;
s->dca_buffer_size = dca_convert_bitstream(buf, buf_size, s->dca_buffer,
DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE);
s->dca_buffer_size = ff_dca_convert_bitstream(buf, buf_size, s->dca_buffer,
DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE);
if (s->dca_buffer_size == AVERROR_INVALIDDATA) {
av_log(avctx, AV_LOG_ERROR, "Not a valid DCA frame\n");
return AVERROR_INVALIDDATA;
@ -1724,7 +1684,6 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
//set AVCodec values with parsed data
avctx->sample_rate = s->sample_rate;
avctx->bit_rate = s->bit_rate;
avctx->frame_size = s->sample_blocks * 32;
s->profile = FF_PROFILE_DTS;

View File

@ -24,6 +24,10 @@
#include "parser.h"
#include "dca.h"
#include "dcadata.h"
#include "dca_parser.h"
#include "get_bits.h"
#include "put_bits.h"
typedef struct DCAParseContext {
ParseContext pc;
@ -98,6 +102,71 @@ static av_cold int dca_parse_init(AVCodecParserContext * s)
return 0;
}
int ff_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
int max_size)
{
uint32_t mrk;
int i, tmp;
const uint16_t *ssrc = (const uint16_t *) src;
uint16_t *sdst = (uint16_t *) dst;
PutBitContext pb;
if ((unsigned) src_size > (unsigned) max_size)
src_size = max_size;
mrk = AV_RB32(src);
switch (mrk) {
case DCA_MARKER_RAW_BE:
memcpy(dst, src, src_size);
return src_size;
case DCA_MARKER_RAW_LE:
for (i = 0; i < (src_size + 1) >> 1; i++)
*sdst++ = av_bswap16(*ssrc++);
return src_size;
case DCA_MARKER_14B_BE:
case DCA_MARKER_14B_LE:
init_put_bits(&pb, dst, max_size);
for (i = 0; i < (src_size + 1) >> 1; i++, src += 2) {
tmp = ((mrk == DCA_MARKER_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF;
put_bits(&pb, 14, tmp);
}
flush_put_bits(&pb);
return (put_bits_count(&pb) + 7) >> 3;
default:
return AVERROR_INVALIDDATA;
}
}
static int dca_parse_params(const uint8_t *buf, int buf_size, int *duration,
int *sample_rate)
{
GetBitContext gb;
uint8_t hdr[12 + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 };
int ret, sample_blocks, sr_code;
if (buf_size < 12)
return AVERROR_INVALIDDATA;
if ((ret = ff_dca_convert_bitstream(buf, 12, hdr, 12)) < 0)
return ret;
init_get_bits(&gb, hdr, 96);
skip_bits_long(&gb, 39);
sample_blocks = get_bits(&gb, 7) + 1;
if (sample_blocks < 8)
return AVERROR_INVALIDDATA;
*duration = 256 * (sample_blocks / 8);
skip_bits(&gb, 20);
sr_code = get_bits(&gb, 4);
*sample_rate = dca_sample_rates[sr_code];
if (*sample_rate == 0)
return AVERROR_INVALIDDATA;
return 0;
}
static int dca_parse(AVCodecParserContext * s,
AVCodecContext * avctx,
const uint8_t ** poutbuf, int *poutbuf_size,
@ -105,7 +174,7 @@ static int dca_parse(AVCodecParserContext * s,
{
DCAParseContext *pc1 = s->priv_data;
ParseContext *pc = &pc1->pc;
int next;
int next, duration, sample_rate;
if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
next = buf_size;
@ -118,6 +187,15 @@ static int dca_parse(AVCodecParserContext * s,
return buf_size;
}
}
/* read the duration and sample rate from the frame header */
if (!dca_parse_params(buf, buf_size, &duration, &sample_rate)) {
s->duration = duration;
if (!avctx->sample_rate)
avctx->sample_rate = sample_rate;
} else
s->duration = 0;
*poutbuf = buf;
*poutbuf_size = buf_size;
return next;

36
libavcodec/dca_parser.h Normal file
View File

@ -0,0 +1,36 @@
/*
* DCA parser
* Copyright (C) 2004 Gildas Bazin
* Copyright (C) 2004 Benjamin Zores
* Copyright (C) 2006 Benjamin Larsson
* Copyright (C) 2007 Konstantin Shishkov
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DCA_PARSER_H
#define AVCODEC_DCA_PARSER_H
#include <stdint.h>
/**
* Convert bitstream to one representation based on sync marker
*/
int ff_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
int max_size);
#endif /* AVCODEC_DCA_PARSER_H */

View File

@ -49,7 +49,7 @@ typedef struct Escape124Context {
} Escape124Context;
static int can_safely_read(GetBitContext* gb, int bits) {
return get_bits_count(gb) + bits <= gb->size_in_bits;
return get_bits_left(gb) >= bits;
}
/**

View File

@ -265,7 +265,7 @@ static int h261_decode_mb(H261Context *h){
while( h->mba_diff == MBA_STUFFING ); // stuffing
if ( h->mba_diff < 0 ){
if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits )
if (get_bits_left(&s->gb) <= 7)
return SLICE_END;
av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y);

View File

@ -664,7 +664,7 @@ retry:
ret = decode_slice(s);
while(s->mb_y<s->mb_height){
if(s->msmpeg4_version){
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits)
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_left(&s->gb)<0)
break;
}else{
int prev_x=s->mb_x, prev_y=s->mb_y;

View File

@ -3687,8 +3687,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
if(s->mb_y >= s->mb_height){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
if( get_bits_count(&s->gb) == s->gb.size_in_bits
|| get_bits_count(&s->gb) < s->gb.size_in_bits && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
if ( get_bits_left(&s->gb) == 0
|| get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
return 0;
@ -3700,9 +3700,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
}
}
if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
if (get_bits_left(&s->gb) == 0) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x);

View File

@ -241,7 +241,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
sps->num_reorder_frames= get_ue_golomb(&s->gb);
get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/
if(get_bits_left(&s->gb) < 0){
if (get_bits_left(&s->gb) < 0) {
sps->num_reorder_frames=0;
sps->bitstream_restriction_flag= 0;
}

View File

@ -164,7 +164,7 @@ static int decode_buffering_period(H264Context *h){
int ff_h264_decode_sei(H264Context *h){
MpegEncContext * const s = &h->s;
while(get_bits_count(&s->gb) + 16 < s->gb.size_in_bits){
while (get_bits_left(&s->gb) > 16) {
int size, type;
type=0;

View File

@ -752,7 +752,7 @@ static void decode_422_bitstream(HYuvContext *s, int count){
count/=2;
if(count >= (get_bits_left(&s->gb))/(31*4)){
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
}
@ -770,7 +770,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
count/=2;
if(count >= (get_bits_left(&s->gb))/(31*2)){
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
}
}else{

View File

@ -854,8 +854,8 @@ end:
{
int v= show_bits(&s->gb, 16);
if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){
v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits;
if (get_bits_left(&s->gb) < 16) {
v >>= 16 - get_bits_left(&s->gb);
}
if(v==0)

View File

@ -33,7 +33,6 @@ static void amr_decode_fix_avctx(AVCodecContext *avctx)
if (!avctx->channels)
avctx->channels = 1;
avctx->frame_size = 160 * is_amr_wb;
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
}

View File

@ -54,9 +54,6 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
if (s->header) {
avctx->sample_rate = s->header->rate;
avctx->channels = s->header->nb_channels;
avctx->frame_size = s->frame_size = s->header->frame_size;
if (s->header->frames_per_packet)
avctx->frame_size *= s->header->frames_per_packet;
mode = speex_lib_get_mode(s->header->mode);
if (!mode) {

View File

@ -985,9 +985,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
if (s->restart_interval && !s->restart_count)
s->restart_count = s->restart_interval;
if (get_bits_count(&s->gb)>s->gb.size_in_bits) {
if (get_bits_left(&s->gb) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
get_bits_count(&s->gb) - s->gb.size_in_bits);
-get_bits_left(&s->gb));
return -1;
}
for (i = 0; i < nb_components; i++) {
@ -1270,7 +1270,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
len = get_bits(&s->gb, 16);
if (len < 5)
return -1;
if (8 * len + get_bits_count(&s->gb) > s->gb.size_in_bits)
if (8 * len > get_bits_left(&s->gb))
return -1;
id = get_bits_long(&s->gb, 32);
@ -1408,8 +1408,7 @@ out:
static int mjpeg_decode_com(MJpegDecodeContext *s)
{
int len = get_bits(&s->gb, 16);
if (len >= 2 &&
8 * len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) {
if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
char *cbuf = av_malloc(len - 1);
if (cbuf) {
int i;

View File

@ -1838,21 +1838,15 @@ void avcodec_default_free_buffers(AVCodecContext *avctx)
}
}
int av_get_bits_per_sample(enum CodecID codec_id){
int av_get_exact_bits_per_sample(enum CodecID codec_id)
{
switch(codec_id){
case CODEC_ID_ADPCM_SBPRO_2:
return 2;
case CODEC_ID_ADPCM_SBPRO_3:
return 3;
case CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_CT:
case CODEC_ID_ADPCM_IMA_APC:
case CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_YAMAHA:
case CODEC_ID_ADPCM_IMA_EA_SEAD:
case CODEC_ID_ADPCM_IMA_WS:
case CODEC_ID_ADPCM_G722:
case CODEC_ID_ADPCM_YAMAHA:
return 4;
case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW:
@ -1908,6 +1902,166 @@ enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
return map[fmt][be];
}
int av_get_bits_per_sample(enum CodecID codec_id)
{
switch (codec_id) {
case CODEC_ID_ADPCM_SBPRO_2:
return 2;
case CODEC_ID_ADPCM_SBPRO_3:
return 3;
case CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_MS:
return 4;
default:
return av_get_exact_bits_per_sample(codec_id);
}
}
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
{
int id, sr, ch, ba, tag, bps;
id = avctx->codec_id;
sr = avctx->sample_rate;
ch = avctx->channels;
ba = avctx->block_align;
tag = avctx->codec_tag;
bps = av_get_exact_bits_per_sample(avctx->codec_id);
/* codecs with an exact constant bits per sample */
if (bps > 0 && ch > 0 && frame_bytes > 0)
return (frame_bytes * 8) / (bps * ch);
bps = avctx->bits_per_coded_sample;
/* codecs with a fixed packet duration */
switch (id) {
case CODEC_ID_ADPCM_ADX: return 32;
case CODEC_ID_ADPCM_IMA_QT: return 64;
case CODEC_ID_ADPCM_EA_XAS: return 128;
case CODEC_ID_AMR_NB:
case CODEC_ID_GSM:
case CODEC_ID_QCELP:
case CODEC_ID_RA_144:
case CODEC_ID_RA_288: return 160;
case CODEC_ID_IMC: return 256;
case CODEC_ID_AMR_WB:
case CODEC_ID_GSM_MS: return 320;
case CODEC_ID_MP1: return 384;
case CODEC_ID_ATRAC1: return 512;
case CODEC_ID_ATRAC3: return 1024;
case CODEC_ID_MP2:
case CODEC_ID_MUSEPACK7: return 1152;
case CODEC_ID_AC3: return 1536;
}
if (sr > 0) {
/* calc from sample rate */
if (id == CODEC_ID_TTA)
return 256 * sr / 245;
if (ch > 0) {
/* calc from sample rate and channels */
if (id == CODEC_ID_BINKAUDIO_DCT)
return (480 << (sr / 22050)) / ch;
}
}
if (ba > 0) {
/* calc from block_align */
if (id == CODEC_ID_SIPR) {
switch (ba) {
case 20: return 160;
case 19: return 144;
case 29: return 288;
case 37: return 480;
}
}
}
if (frame_bytes > 0) {
/* calc from frame_bytes only */
if (id == CODEC_ID_TRUESPEECH)
return 240 * (frame_bytes / 32);
if (id == CODEC_ID_NELLYMOSER)
return 256 * (frame_bytes / 64);
if (bps > 0) {
/* calc from frame_bytes and bits_per_coded_sample */
if (id == CODEC_ID_ADPCM_G726)
return frame_bytes * 8 / bps;
}
if (ch > 0) {
/* calc from frame_bytes and channels */
switch (id) {
case CODEC_ID_ADPCM_4XM:
case CODEC_ID_ADPCM_IMA_ISS:
return (frame_bytes - 4 * ch) * 2 / ch;
case CODEC_ID_ADPCM_IMA_SMJPEG:
return (frame_bytes - 4) * 2 / ch;
case CODEC_ID_ADPCM_IMA_AMV:
return (frame_bytes - 8) * 2 / ch;
case CODEC_ID_ADPCM_XA:
return (frame_bytes / 128) * 224 / ch;
case CODEC_ID_INTERPLAY_DPCM:
return (frame_bytes - 6 - ch) / ch;
case CODEC_ID_ROQ_DPCM:
return (frame_bytes - 8) / ch;
case CODEC_ID_XAN_DPCM:
return (frame_bytes - 2 * ch) / ch;
case CODEC_ID_MACE3:
return 3 * frame_bytes / ch;
case CODEC_ID_MACE6:
return 6 * frame_bytes / ch;
case CODEC_ID_PCM_LXF:
return 2 * (frame_bytes / (5 * ch));
}
if (tag) {
/* calc from frame_bytes, channels, and codec_tag */
if (id == CODEC_ID_SOL_DPCM) {
if (tag == 3)
return frame_bytes / ch;
else
return frame_bytes * 2 / ch;
}
}
if (ba > 0) {
/* calc from frame_bytes, channels, and block_align */
int blocks = frame_bytes / ba;
switch (avctx->codec_id) {
case CODEC_ID_ADPCM_IMA_WAV:
return blocks * (1 + (ba - 4 * ch) / (4 * ch) * 8);
case CODEC_ID_ADPCM_IMA_DK3:
return blocks * (((ba - 16) * 8 / 3) / ch);
case CODEC_ID_ADPCM_IMA_DK4:
return blocks * (1 + (ba - 4 * ch) * 2 / ch);
case CODEC_ID_ADPCM_MS:
return blocks * (2 + (ba - 7 * ch) * 2 / ch);
}
}
if (bps > 0) {
/* calc from frame_bytes, channels, and bits_per_coded_sample */
switch (avctx->codec_id) {
case CODEC_ID_PCM_DVD:
return 2 * (frame_bytes / ((bps * 2 / 8) * ch));
case CODEC_ID_PCM_BLURAY:
return frame_bytes / ((FFALIGN(ch, 2) * bps) / 8);
case CODEC_ID_S302M:
return 2 * (frame_bytes / ((bps + 4) / 4)) / ch;
}
}
}
}
return 0;
}
#if !HAVE_THREADS
int ff_thread_init(AVCodecContext *s){
return -1;

View File

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 54
#define LIBAVCODEC_VERSION_MINOR 9
#define LIBAVCODEC_VERSION_MINOR 10
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -387,7 +387,7 @@ static void vp6_parse_coeff_huffman(VP56Context *s)
if (coeff_idx)
break;
} else {
if (get_bits_count(&s->gb) >= s->gb.size_in_bits)
if (get_bits_left(&s->gb) <= 0)
return;
coeff = get_vlc2(&s->gb, vlc_coeff->table, 9, 3);
if (coeff == 0) {

View File

@ -2415,11 +2415,469 @@ extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
const float *src1, int len);
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
#define H264_QPEL_FUNCS(x, y, CPU) \
c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU; \
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU; \
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU; \
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU
#define H264_QPEL_FUNCS_10(x, y, CPU) \
c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU; \
c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU; \
c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU; \
c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU;
static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
if (!high_bit_depth) {
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
c->draw_edges = draw_edges_mmx;
SET_HPEL_FUNCS(put, 0, 16, mmx);
SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
SET_HPEL_FUNCS(avg, 0, 16, mmx);
SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
SET_HPEL_FUNCS(put, 1, 8, mmx);
SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
SET_HPEL_FUNCS(avg, 1, 8, mmx);
SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
}
#if ARCH_X86_32 || !HAVE_YASM
c->gmc= gmc_mmx;
#endif
#if ARCH_X86_32 && HAVE_YASM
if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_mmx;
#endif
c->add_bytes = add_bytes_mmx;
c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_v_loop_filter = h263_v_loop_filter_mmx;
c->h263_h_loop_filter = h263_h_loop_filter_mmx;
}
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
}
c->vector_clip_int32 = ff_vector_clip_int32_mmx;
#endif
}
static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
const int bit_depth = avctx->bits_per_raw_sample;
const int high_bit_depth = bit_depth > 8;
c->prefetch = prefetch_mmx2;
if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
}
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
if (!high_bit_depth) {
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
}
if (CONFIG_VP3_DECODER && HAVE_YASM) {
c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2;
c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2;
}
}
if (CONFIG_VP3_DECODER && HAVE_YASM) {
c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
}
if (CONFIG_VP3_DECODER
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
}
SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
} else if (bit_depth == 10) {
#if HAVE_YASM
#if !ARCH_X86_64
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
#endif
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
#endif
}
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2;
}
if (bit_depth == 10 && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
}
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
if (avctx->flags & CODEC_FLAG_BITEXACT) {
c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
} else {
c->apply_window_int16 = ff_apply_window_int16_mmxext;
}
#endif
}
static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
c->prefetch = prefetch_3dnow;
if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
}
}
if (CONFIG_VP3_DECODER
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
}
SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
}
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
}
#endif
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
c->vector_fmul_add = vector_fmul_add_3dnow;
#if HAVE_7REGS
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
#endif
}
static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_3dnow2;
#endif
}
static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
if (!high_bit_depth) {
if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
c->clear_block = clear_block_sse;
c->clear_blocks = clear_blocks_sse;
}
}
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse;
c->vector_fmul = vector_fmul_sse;
c->vector_fmul_reverse = vector_fmul_reverse_sse;
if (!(mm_flags & AV_CPU_FLAG_3DNOW))
c->vector_fmul_add = vector_fmul_add_sse;
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_sse;
#endif
c->vector_clipf = vector_clipf_sse;
#if HAVE_YASM
c->scalarproduct_float = ff_scalarproduct_float_sse;
c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_sse;
c->gmc = gmc_sse;
#endif
}
static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
const int bit_depth = avctx->bits_per_raw_sample;
const int high_bit_depth = bit_depth > 8;
if (mm_flags & AV_CPU_FLAG_3DNOW) {
// these functions are slower than mmx on AMD, but faster on Intel
if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_sse2;
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
H264_QPEL_FUNCS(0, 0, sse2);
}
}
if (!high_bit_depth) {
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
H264_QPEL_FUNCS(1, 1, sse2);
H264_QPEL_FUNCS(1, 2, sse2);
H264_QPEL_FUNCS(1, 3, sse2);
H264_QPEL_FUNCS(2, 1, sse2);
H264_QPEL_FUNCS(2, 2, sse2);
H264_QPEL_FUNCS(2, 3, sse2);
H264_QPEL_FUNCS(3, 1, sse2);
H264_QPEL_FUNCS(3, 2, sse2);
H264_QPEL_FUNCS(3, 3, sse2);
}
#if HAVE_YASM
if (bit_depth == 10) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
}
}
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
if (mm_flags & AV_CPU_FLAG_ATOM) {
c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
} else {
c->vector_clip_int32 = ff_vector_clip_int32_sse2;
}
if (avctx->flags & CODEC_FLAG_BITEXACT) {
c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
} else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
c->apply_window_int16 = ff_apply_window_int16_sse2;
}
c->bswap_buf = ff_bswap32_buf_sse2;
#endif
}
static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
#if HAVE_SSSE3
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
const int bit_depth = avctx->bits_per_raw_sample;
if (!high_bit_depth) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
H264_QPEL_FUNCS(1, 3, ssse3);
H264_QPEL_FUNCS(2, 0, ssse3);
H264_QPEL_FUNCS(2, 1, ssse3);
H264_QPEL_FUNCS(2, 2, ssse3);
H264_QPEL_FUNCS(2, 3, ssse3);
H264_QPEL_FUNCS(3, 0, ssse3);
H264_QPEL_FUNCS(3, 1, ssse3);
H264_QPEL_FUNCS(3, 2, ssse3);
H264_QPEL_FUNCS(3, 3, ssse3);
}
#if HAVE_YASM
else if (bit_depth == 10) {
H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
}
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
}
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
if (mm_flags & AV_CPU_FLAG_ATOM) {
c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
} else {
c->apply_window_int16 = ff_apply_window_int16_ssse3;
}
if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
}
c->bswap_buf = ff_bswap32_buf_ssse3;
#endif
#endif
}
static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
#if HAVE_YASM
c->vector_clip_int32 = ff_vector_clip_int32_sse4;
#endif
}
static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
{
#if HAVE_AVX && HAVE_YASM
const int bit_depth = avctx->bits_per_raw_sample;
if (bit_depth == 10) {
// AVX implies !cache64.
// TODO: Port cache(32|64) detection from x264.
H264_QPEL_FUNCS_10(1, 0, sse2);
H264_QPEL_FUNCS_10(2, 0, sse2);
H264_QPEL_FUNCS_10(3, 0, sse2);
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
}
}
c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
#endif
}
void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
const int bit_depth = avctx->bits_per_raw_sample;
if (avctx->dsp_mask) {
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
@ -2498,433 +2956,33 @@ void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
if (!high_bit_depth) {
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
if ((mm_flags & AV_CPU_FLAG_SSE) &&
!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
c->clear_block = clear_block_sse;
c->clear_blocks = clear_blocks_sse;
}
}
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
if (!high_bit_depth) {
SET_HPEL_FUNCS(put, 0, 16, mmx);
SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
SET_HPEL_FUNCS(avg, 0, 16, mmx);
SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
SET_HPEL_FUNCS(put, 1, 8, mmx);
SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
SET_HPEL_FUNCS(avg, 1, 8, mmx);
SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
}
#if ARCH_X86_32 || !HAVE_YASM
c->gmc= gmc_mmx;
#endif
#if ARCH_X86_32 && HAVE_YASM
if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_mmx;
#endif
c->add_bytes= add_bytes_mmx;
if (!high_bit_depth)
c->draw_edges = draw_edges_mmx;
c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_v_loop_filter= h263_v_loop_filter_mmx;
c->h263_h_loop_filter= h263_h_loop_filter_mmx;
}
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
}
c->vector_clip_int32 = ff_vector_clip_int32_mmx;
#endif
if (mm_flags & AV_CPU_FLAG_MMX2) {
c->prefetch = prefetch_mmx2;
if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
}
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
if (!high_bit_depth) {
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
}
if (CONFIG_VP3_DECODER && HAVE_YASM) {
c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
}
}
if (CONFIG_VP3_DECODER && HAVE_YASM) {
c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
}
if (CONFIG_VP3_DECODER
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
}
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU
SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
}
else if (bit_depth == 10) {
#if HAVE_YASM
#if !ARCH_X86_64
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
#endif
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
#endif
}
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
}
if (bit_depth == 10 && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_10_mmxext;
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_10_mmxext;
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_10_mmxext;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_10_mmxext;
}
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
#endif
#if HAVE_7REGS
if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
#endif
} else if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
c->prefetch = prefetch_3dnow;
if (!high_bit_depth) {
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
}
}
if (CONFIG_VP3_DECODER
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
}
SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
}
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
#if HAVE_YASM
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
}
#endif
}
#define H264_QPEL_FUNCS(x, y, CPU)\
c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
// these functions are slower than mmx on AMD, but faster on Intel
if (!high_bit_depth) {
c->put_pixels_tab[0][0] = put_pixels16_sse2;
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
H264_QPEL_FUNCS(0, 0, sse2);
}
}
if(mm_flags & AV_CPU_FLAG_SSE2){
if (!high_bit_depth) {
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
H264_QPEL_FUNCS(1, 1, sse2);
H264_QPEL_FUNCS(1, 2, sse2);
H264_QPEL_FUNCS(1, 3, sse2);
H264_QPEL_FUNCS(2, 1, sse2);
H264_QPEL_FUNCS(2, 2, sse2);
H264_QPEL_FUNCS(2, 3, sse2);
H264_QPEL_FUNCS(3, 1, sse2);
H264_QPEL_FUNCS(3, 2, sse2);
H264_QPEL_FUNCS(3, 3, sse2);
}
#if HAVE_YASM
#define H264_QPEL_FUNCS_10(x, y, CPU)\
c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU;\
c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU;\
c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU;\
c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU;
if (bit_depth == 10) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
H264_QPEL_FUNCS_10(1, 0, sse2_cache64)
H264_QPEL_FUNCS_10(2, 0, sse2_cache64)
H264_QPEL_FUNCS_10(3, 0, sse2_cache64)
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
}
}
#endif
}
#if HAVE_SSSE3
if(mm_flags & AV_CPU_FLAG_SSSE3){
if (!high_bit_depth) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
H264_QPEL_FUNCS(1, 3, ssse3);
H264_QPEL_FUNCS(2, 0, ssse3);
H264_QPEL_FUNCS(2, 1, ssse3);
H264_QPEL_FUNCS(2, 2, ssse3);
H264_QPEL_FUNCS(2, 3, ssse3);
H264_QPEL_FUNCS(3, 0, ssse3);
H264_QPEL_FUNCS(3, 1, ssse3);
H264_QPEL_FUNCS(3, 2, ssse3);
H264_QPEL_FUNCS(3, 3, ssse3);
}
#if HAVE_YASM
else if (bit_depth == 10) {
H264_QPEL_FUNCS_10(1, 0, ssse3_cache64)
H264_QPEL_FUNCS_10(2, 0, ssse3_cache64)
H264_QPEL_FUNCS_10(3, 0, ssse3_cache64)
}
if (!high_bit_depth && CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
}
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
#endif
}
#endif
if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
}
if (HAVE_AMD3DNOWEXT && (mm_flags & AV_CPU_FLAG_3DNOWEXT)) {
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_3dnow2;
#endif
}
if(mm_flags & AV_CPU_FLAG_MMX2){
#if HAVE_YASM
c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
if (avctx->flags & CODEC_FLAG_BITEXACT) {
c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
} else {
c->apply_window_int16 = ff_apply_window_int16_mmxext;
}
#endif
}
if(mm_flags & AV_CPU_FLAG_SSE){
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse;
c->vector_fmul = vector_fmul_sse;
c->vector_fmul_reverse = vector_fmul_reverse_sse;
c->vector_fmul_add = vector_fmul_add_sse;
#if HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_sse;
#endif
c->vector_clipf = vector_clipf_sse;
#if HAVE_YASM
c->scalarproduct_float = ff_scalarproduct_float_sse;
c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_sse;
c->gmc = gmc_sse;
#endif
}
if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
if(mm_flags & AV_CPU_FLAG_SSE2){
#if HAVE_YASM
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
if (mm_flags & AV_CPU_FLAG_ATOM) {
c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
} else {
c->vector_clip_int32 = ff_vector_clip_int32_sse2;
}
if (avctx->flags & CODEC_FLAG_BITEXACT) {
c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
} else {
if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
c->apply_window_int16 = ff_apply_window_int16_sse2;
}
}
c->bswap_buf = ff_bswap32_buf_sse2;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
#if HAVE_YASM
if (mm_flags & AV_CPU_FLAG_ATOM) {
c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
} else {
c->apply_window_int16 = ff_apply_window_int16_ssse3;
}
if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
}
c->bswap_buf = ff_bswap32_buf_ssse3;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
#if HAVE_YASM
c->vector_clip_int32 = ff_vector_clip_int32_sse4;
#endif
}
#if HAVE_AVX && HAVE_YASM
if (mm_flags & AV_CPU_FLAG_AVX) {
if (bit_depth == 10) {
//AVX implies !cache64.
//TODO: Port cache(32|64) detection from x264.
H264_QPEL_FUNCS_10(1, 0, sse2)
H264_QPEL_FUNCS_10(2, 0, sse2)
H264_QPEL_FUNCS_10(3, 0, sse2)
if (CONFIG_H264CHROMA) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
}
}
c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
}
#endif
dsputil_init_mmx(c, avctx, mm_flags);
}
if (mm_flags & AV_CPU_FLAG_MMX2)
dsputil_init_mmx2(c, avctx, mm_flags);
if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
dsputil_init_3dnow(c, avctx, mm_flags);
if (HAVE_AMD3DNOWEXT && (mm_flags & AV_CPU_FLAG_3DNOWEXT))
dsputil_init_3dnow2(c, avctx, mm_flags);
if (HAVE_SSE && (mm_flags & AV_CPU_FLAG_SSE))
dsputil_init_sse(c, avctx, mm_flags);
if (mm_flags & AV_CPU_FLAG_SSE2)
dsputil_init_sse2(c, avctx, mm_flags);
if (mm_flags & AV_CPU_FLAG_SSSE3)
dsputil_init_ssse3(c, avctx, mm_flags);
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE)
dsputil_init_sse4(c, avctx, mm_flags);
if (mm_flags & AV_CPU_FLAG_AVX)
dsputil_init_avx(c, avctx, mm_flags);
if (CONFIG_ENCODERS)
ff_dsputilenc_init_mmx(c, avctx);
}

View File

@ -32,6 +32,7 @@
typedef struct {
int64_t data_end;
int block_duration;
} AIFFInputContext;
static enum CodecID aiff_codec_get_id(int bps)
@ -87,9 +88,12 @@ static void get_meta(AVFormatContext *s, const char *key, int size)
}
/* Returns the number of sound data frames or negative on error */
static unsigned int get_aiff_header(AVIOContext *pb, AVCodecContext *codec,
int size, unsigned version)
static unsigned int get_aiff_header(AVFormatContext *s, int size,
unsigned version)
{
AVIOContext *pb = s->pb;
AVCodecContext *codec = s->streams[0]->codec;
AIFFInputContext *aiff = s->priv_data;
int exp;
uint64_t val;
double sample_rate;
@ -117,26 +121,27 @@ static unsigned int get_aiff_header(AVIOContext *pb, AVCodecContext *codec,
case CODEC_ID_PCM_S16BE:
codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample);
codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id);
aiff->block_duration = 1;
break;
case CODEC_ID_ADPCM_IMA_QT:
codec->block_align = 34*codec->channels;
codec->frame_size = 64;
aiff->block_duration = 64;
break;
case CODEC_ID_MACE3:
codec->block_align = 2*codec->channels;
codec->frame_size = 6;
aiff->block_duration = 6;
break;
case CODEC_ID_MACE6:
codec->block_align = 1*codec->channels;
codec->frame_size = 6;
aiff->block_duration = 6;
break;
case CODEC_ID_GSM:
codec->block_align = 33;
codec->frame_size = 160;
aiff->block_duration = 160;
break;
case CODEC_ID_QCELP:
codec->block_align = 35;
codec->frame_size= 160;
aiff->block_duration = 160;
break;
default:
break;
@ -146,6 +151,7 @@ static unsigned int get_aiff_header(AVIOContext *pb, AVCodecContext *codec,
/* Need the codec type */
codec->codec_id = aiff_codec_get_id(codec->bits_per_coded_sample);
codec->bits_per_coded_sample = av_get_bits_per_sample(codec->codec_id);
aiff->block_duration = 1;
}
/* Block align needs to be computed in all cases, as the definition
@ -153,8 +159,8 @@ static unsigned int get_aiff_header(AVIOContext *pb, AVCodecContext *codec,
if (!codec->block_align)
codec->block_align = (codec->bits_per_coded_sample * codec->channels) >> 3;
codec->bit_rate = (codec->frame_size ? codec->sample_rate/codec->frame_size :
codec->sample_rate) * (codec->block_align << 3);
codec->bit_rate = codec->sample_rate * (codec->block_align << 3) /
aiff->block_duration;
/* Chunk is over */
if (size)
@ -215,7 +221,7 @@ static int aiff_read_header(AVFormatContext *s)
switch (tag) {
case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
/* Then for the complete header info */
st->nb_frames = get_aiff_header(pb, st->codec, size, version);
st->nb_frames = get_aiff_header(s, size, version);
if (st->nb_frames < 0)
return st->nb_frames;
if (offset > 0) // COMM is after SSND
@ -279,8 +285,7 @@ got_sound:
/* Now positioned, get the sound data start and end */
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
st->duration = st->codec->frame_size ?
st->nb_frames * st->codec->frame_size : st->nb_frames;
st->duration = st->nb_frames * aiff->block_duration;
/* Position the stream at the first block */
avio_seek(pb, offset, SEEK_SET);
@ -315,6 +320,7 @@ static int aiff_read_packet(AVFormatContext *s,
/* Only one stream in an AIFF file */
pkt->stream_index = 0;
pkt->duration = (res / st->codec->block_align) * aiff->block_duration;
return 0;
}

View File

@ -100,14 +100,12 @@ static int amr_read_header(AVFormatContext *s)
st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b');
st->codec->codec_id = CODEC_ID_AMR_WB;
st->codec->sample_rate = 16000;
st->codec->frame_size = 320;
}
else
{
st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r');
st->codec->codec_id = CODEC_ID_AMR_NB;
st->codec->sample_rate = 8000;
st->codec->frame_size = 160;
}
st->codec->channels = 1;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

View File

@ -335,7 +335,6 @@ static int ape_read_header(AVFormatContext * s)
st->codec->channels = ape->channels;
st->codec->sample_rate = ape->samplerate;
st->codec->bits_per_coded_sample = ape->bps;
st->codec->frame_size = MAC_SUBFRAME_SIZE;
st->nb_frames = ape->totalframes;
st->start_time = 0;

View File

@ -650,12 +650,9 @@ typedef struct AVStream {
double duration_error[2][2][MAX_STD_TIMEBASES];
int64_t codec_info_duration;
int nb_decoded_frames;
int found_decoder;
} *info;
AVPacket cur_pkt;
const uint8_t *cur_ptr;
int cur_len;
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
// Timestamp generation support:
@ -990,9 +987,6 @@ typedef struct AVFormatContext {
struct AVPacketList *packet_buffer;
struct AVPacketList *packet_buffer_end;
/* av_read_frame() support */
AVStream *cur_st;
/* av_seek_frame() support */
int64_t data_offset; /**< offset of the first packet */
@ -1004,6 +998,11 @@ typedef struct AVFormatContext {
*/
struct AVPacketList *raw_packet_buffer;
struct AVPacketList *raw_packet_buffer_end;
/**
* Packets split by the parser get queued here.
*/
struct AVPacketList *parse_queue;
struct AVPacketList *parse_queue_end;
/**
* Remaining size available for raw_packet_buffer, in bytes.
*/

View File

@ -323,13 +323,7 @@ DVDemuxContext* avpriv_dv_init_demux(AVFormatContext *s)
return NULL;
}
c->sys = NULL;
c->fctx = s;
memset(c->ast, 0, sizeof(c->ast));
c->ach = 0;
c->frames = 0;
c->abytes = 0;
c->fctx = s;
c->vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
c->vst->codec->codec_id = CODEC_ID_DVVIDEO;
c->vst->codec->bit_rate = 25000000;

View File

@ -1464,20 +1464,16 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
// force sample rate for qcelp when not stored in mov
if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
st->codec->sample_rate = 8000;
st->codec->frame_size= 160;
st->codec->channels= 1; /* really needed */
break;
case CODEC_ID_AMR_NB:
st->codec->channels= 1; /* really needed */
/* force sample rate for amr, stsd in 3gp does not store sample rate */
st->codec->sample_rate = 8000;
/* force frame_size, too, samples_per_frame isn't always set properly */
st->codec->frame_size = 160;
break;
case CODEC_ID_AMR_WB:
st->codec->channels = 1;
st->codec->sample_rate = 16000;
st->codec->frame_size = 320;
break;
case CODEC_ID_MP2:
case CODEC_ID_MP3:
@ -1487,12 +1483,10 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
case CODEC_ID_GSM:
case CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_IMA_WAV:
st->codec->frame_size = sc->samples_per_frame;
st->codec->block_align = sc->bytes_per_frame;
break;
case CODEC_ID_ALAC:
if (st->codec->extradata_size == 36) {
st->codec->frame_size = AV_RB32(st->codec->extradata+12);
st->codec->channels = AV_RB8 (st->codec->extradata+21);
st->codec->sample_rate = AV_RB32(st->codec->extradata+32);
}
@ -2058,13 +2052,6 @@ static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
avpriv_set_pts_info(st, 64, 1, sc->time_scale);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
!st->codec->frame_size && sc->stts_count == 1) {
st->codec->frame_size = av_rescale(sc->stts_data[0].duration,
st->codec->sample_rate, sc->time_scale);
av_dlog(c->fc, "frame size %d\n", st->codec->frame_size);
}
mov_build_index(c, st);
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {

View File

@ -432,9 +432,6 @@ rdt_parse_sdp_line (AVFormatContext *s, int st_index,
}
rdt->rmst[s->streams[n]->index] = ff_rm_alloc_rmstream();
rdt_load_mdpr(rdt, s->streams[n], (n - first) * 2);
if (s->streams[n]->codec->codec_id == CODEC_ID_AAC)
s->streams[n]->codec->frame_size = 1; // FIXME
}
}

View File

@ -414,7 +414,7 @@ void ff_end_tag(AVIOContext *pb, int64_t start)
/* returns the size or -1 on error */
int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
{
int bps, blkalign, bytespersec;
int bps, blkalign, bytespersec, frame_size;
int hdrsize = 18;
int waveformatextensible;
uint8_t temp[256];
@ -423,6 +423,14 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
if(!enc->codec_tag || enc->codec_tag > 0xffff)
return -1;
/* We use the known constant frame size for the codec if known, otherwise
fallback to using AVCodecContext.frame_size, which is not as reliable
for indicating packet duration */
frame_size = av_get_audio_frame_duration(enc, 0);
if (!frame_size)
frame_size = enc->frame_size;
waveformatextensible = (enc->channels > 2 && enc->channel_layout)
|| enc->sample_rate > 48000
|| av_get_bits_per_sample(enc->codec_id) > 16;
@ -449,7 +457,9 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
}
if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
blkalign = enc->frame_size; //this is wrong, but it seems many demuxers do not work if this is set correctly
/* this is wrong, but it seems many demuxers do not work if this is set
correctly */
blkalign = frame_size;
//blkalign = 144 * enc->bit_rate/enc->sample_rate;
} else if (enc->codec_id == CODEC_ID_AC3) {
blkalign = 3840; //maximum bytes per frame
@ -489,7 +499,7 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */
} else if (enc->codec_id == CODEC_ID_GSM_MS || enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) {
hdrsize += 2;
bytestream_put_le16(&riff_extradata, enc->frame_size); /* wSamplesPerBlock */
bytestream_put_le16(&riff_extradata, frame_size); /* wSamplesPerBlock */
} else if(enc->extradata_size){
riff_extradata_start= enc->extradata;
riff_extradata= enc->extradata + enc->extradata_size;
@ -657,10 +667,18 @@ int ff_get_bmp_header(AVIOContext *pb, AVStream *st)
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale)
{
int gcd;
int audio_frame_size;
/* We use the known constant frame size for the codec if known, otherwise
fallback to using AVCodecContext.frame_size, which is not as reliable
for indicating packet duration */
audio_frame_size = av_get_audio_frame_duration(stream, 0);
if (!audio_frame_size)
audio_frame_size = stream->frame_size;
*au_ssize= stream->block_align;
if(stream->frame_size && stream->sample_rate){
*au_scale=stream->frame_size;
if (audio_frame_size && stream->sample_rate) {
*au_scale = audio_frame_size;
*au_rate= stream->sample_rate;
}else if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
stream->codec_type == AVMEDIA_TYPE_DATA ||

View File

@ -129,10 +129,17 @@ static int rtp_write_header(AVFormatContext *s1)
s->max_frames_per_packet = 0;
if (s1->max_delay) {
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->frame_size == 0) {
int frame_size = av_get_audio_frame_duration(st->codec, 0);
if (!frame_size)
frame_size = st->codec->frame_size;
if (frame_size == 0) {
av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
} else {
s->max_frames_per_packet = av_rescale_rnd(s1->max_delay, st->codec->sample_rate, AV_TIME_BASE * (int64_t)st->codec->frame_size, AV_ROUND_DOWN);
s->max_frames_per_packet =
av_rescale_q_rnd(s1->max_delay,
AV_TIME_BASE_Q,
(AVRational){ frame_size / st->codec->sample_rate },
AV_ROUND_DOWN);
}
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

View File

@ -409,13 +409,13 @@ AVParserState *ff_store_parser_state(AVFormatContext *s)
state->fpos = avio_tell(s->pb);
// copy context structures
state->cur_st = s->cur_st;
state->packet_buffer = s->packet_buffer;
state->parse_queue = s->parse_queue;
state->raw_packet_buffer = s->raw_packet_buffer;
state->raw_packet_buffer_remaining_size = s->raw_packet_buffer_remaining_size;
s->cur_st = NULL;
s->packet_buffer = NULL;
s->parse_queue = NULL;
s->raw_packet_buffer = NULL;
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
@ -429,19 +429,13 @@ AVParserState *ff_store_parser_state(AVFormatContext *s)
ss->last_IP_pts = st->last_IP_pts;
ss->cur_dts = st->cur_dts;
ss->reference_dts = st->reference_dts;
ss->cur_ptr = st->cur_ptr;
ss->cur_len = st->cur_len;
ss->probe_packets = st->probe_packets;
ss->cur_pkt = st->cur_pkt;
st->parser = NULL;
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE;
st->reference_dts = AV_NOPTS_VALUE;
st->cur_ptr = NULL;
st->cur_len = 0;
st->probe_packets = MAX_PROBE_PACKETS;
av_init_packet(&st->cur_pkt);
}
return state;
@ -460,8 +454,8 @@ void ff_restore_parser_state(AVFormatContext *s, AVParserState *state)
avio_seek(s->pb, state->fpos, SEEK_SET);
// copy context structures
s->cur_st = state->cur_st;
s->packet_buffer = state->packet_buffer;
s->parse_queue = state->parse_queue;
s->raw_packet_buffer = state->raw_packet_buffer;
s->raw_packet_buffer_remaining_size = state->raw_packet_buffer_remaining_size;
@ -474,10 +468,7 @@ void ff_restore_parser_state(AVFormatContext *s, AVParserState *state)
st->last_IP_pts = ss->last_IP_pts;
st->cur_dts = ss->cur_dts;
st->reference_dts = ss->reference_dts;
st->cur_ptr = ss->cur_ptr;
st->cur_len = ss->cur_len;
st->probe_packets = ss->probe_packets;
st->cur_pkt = ss->cur_pkt;
}
av_free(state->stream_states);
@ -507,10 +498,10 @@ void ff_free_parser_state(AVFormatContext *s, AVParserState *state)
ss = &state->stream_states[i];
if (ss->parser)
av_parser_close(ss->parser);
av_free_packet(&ss->cur_pkt);
}
free_packet_list(state->packet_buffer);
free_packet_list(state->parse_queue);
free_packet_list(state->raw_packet_buffer);
av_free(state->stream_states);

View File

@ -31,12 +31,9 @@
typedef struct AVParserStreamState {
// saved members of AVStream
AVCodecParserContext *parser;
AVPacket cur_pkt;
int64_t last_IP_pts;
int64_t cur_dts;
int64_t reference_dts;
const uint8_t *cur_ptr;
int cur_len;
int probe_packets;
} AVParserStreamState;
@ -47,8 +44,8 @@ typedef struct AVParserState {
int64_t fpos; ///< file position at the time of call
// saved members of AVFormatContext
AVStream *cur_st; ///< current stream.
AVPacketList *packet_buffer; ///< packet buffer of original state
AVPacketList *parse_queue; ///< parse queue of original state
AVPacketList *raw_packet_buffer; ///< raw packet buffer of original state
int raw_packet_buffer_remaining_size; ///< remaining space in raw_packet_buffer

View File

@ -79,10 +79,10 @@ static int create_audio_stream(AVFormatContext *s, SIFFContext *c)
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = CODEC_ID_PCM_U8;
ast->codec->channels = 1;
ast->codec->bits_per_coded_sample = c->bits;
ast->codec->bits_per_coded_sample = 8;
ast->codec->sample_rate = c->rate;
ast->codec->frame_size = c->block_align;
avpriv_set_pts_info(ast, 16, 1, c->rate);
ast->start_time = 0;
return 0;
}
@ -215,9 +215,10 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->stream_index = 0;
c->curstrm = -1;
}else{
if (av_get_packet(s->pb, pkt, c->sndsize - 4) < 0)
if ((size = av_get_packet(s->pb, pkt, c->sndsize - 4)) < 0)
return AVERROR(EIO);
pkt->stream_index = 1;
pkt->duration = size;
c->curstrm = 0;
}
if(!c->cur_frame || c->curstrm)
@ -228,6 +229,7 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt)
size = av_get_packet(s->pb, pkt, c->block_align);
if(size <= 0)
return AVERROR(EIO);
pkt->duration = size;
}
return pkt->size;
}

View File

@ -187,10 +187,6 @@ static int swf_write_header(AVFormatContext *s)
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
if (enc->codec_id == CODEC_ID_MP3) {
if (!enc->frame_size) {
av_log(s, AV_LOG_ERROR, "audio frame size not set\n");
return -1;
}
swf->audio_enc = enc;
swf->audio_fifo= av_fifo_alloc(AUDIO_FIFO_SIZE);
if (!swf->audio_fifo)
@ -452,7 +448,7 @@ static int swf_write_audio(AVFormatContext *s,
}
av_fifo_generic_write(swf->audio_fifo, buf, size, NULL);
swf->sound_samples += enc->frame_size;
swf->sound_samples += av_get_audio_frame_duration(enc, size);
/* if audio only stream make sure we add swf frames */
if (!swf->video_enc)

View File

@ -754,11 +754,11 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt)
static int determinable_frame_size(AVCodecContext *avctx)
{
if (avctx->codec_id == CODEC_ID_AAC ||
if (/*avctx->codec_id == CODEC_ID_AAC ||
avctx->codec_id == CODEC_ID_MP1 ||
avctx->codec_id == CODEC_ID_MP2 ||
avctx->codec_id == CODEC_ID_MP3 ||
avctx->codec_id == CODEC_ID_CELT)
avctx->codec_id == CODEC_ID_MP2 ||*/
avctx->codec_id == CODEC_ID_MP3/* ||
avctx->codec_id == CODEC_ID_CELT*/)
return 1;
return 0;
}
@ -766,27 +766,22 @@ static int determinable_frame_size(AVCodecContext *avctx)
/**
* Get the number of samples of an audio frame. Return -1 on error.
*/
static int get_audio_frame_size(AVCodecContext *enc, int size)
static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
{
int frame_size;
if (enc->frame_size <= 1) {
int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
/* give frame_size priority if demuxing */
if (!mux && enc->frame_size > 1)
return enc->frame_size;
if (bits_per_sample) {
if (enc->channels == 0)
return -1;
frame_size = (size << 3) / (bits_per_sample * enc->channels);
} else {
/* used for example by ADPCM codecs */
if (enc->bit_rate == 0 || determinable_frame_size(enc))
return -1;
frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
}
} else {
frame_size = enc->frame_size;
}
return frame_size;
if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
return frame_size;
/* fallback to using frame_size if muxing */
if (enc->frame_size > 1)
return enc->frame_size;
return -1;
}
@ -822,7 +817,7 @@ static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
}
break;
case AVMEDIA_TYPE_AUDIO:
frame_size = get_audio_frame_size(st->codec, pkt->size);
frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
if (frame_size <= 0 || st->codec->sample_rate <= 0)
break;
*pnum = frame_size;
@ -860,11 +855,20 @@ static int is_intra_only(AVCodecContext *enc){
return 0;
}
static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
{
if (pktl->next)
return pktl->next;
if (pktl == s->parse_queue_end)
return s->packet_buffer;
return NULL;
}
static void update_initial_timestamps(AVFormatContext *s, int stream_index,
int64_t dts, int64_t pts)
{
AVStream *st= s->streams[stream_index];
AVPacketList *pktl= s->packet_buffer;
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
return;
@ -872,7 +876,7 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
st->first_dts= dts - st->cur_dts;
st->cur_dts= dts;
for(; pktl; pktl= pktl->next){
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index != stream_index)
continue;
//FIXME think more about this check
@ -889,34 +893,36 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
st->start_time = pts;
}
static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
static void update_initial_durations(AVFormatContext *s, AVStream *st,
int stream_index, int duration)
{
AVPacketList *pktl= s->packet_buffer;
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
int64_t cur_dts= 0;
if(st->first_dts != AV_NOPTS_VALUE){
cur_dts= st->first_dts;
for(; pktl; pktl= pktl->next){
if(pktl->pkt.stream_index == pkt->stream_index){
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index == stream_index){
if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
break;
cur_dts -= pkt->duration;
cur_dts -= duration;
}
}
pktl= s->packet_buffer;
pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
st->first_dts = cur_dts;
}else if(st->cur_dts)
return;
for(; pktl; pktl= pktl->next){
if(pktl->pkt.stream_index != pkt->stream_index)
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
if(pktl->pkt.stream_index != stream_index)
continue;
if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
&& !pktl->pkt.duration){
pktl->pkt.dts= cur_dts;
if(!st->codec->has_b_frames)
pktl->pkt.pts= cur_dts;
pktl->pkt.duration= pkt->duration;
// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
pktl->pkt.duration = duration;
}else
break;
cur_dts = pktl->pkt.dts + pktl->pkt.duration;
@ -969,8 +975,8 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
}
}
if(pkt->duration != 0 && s->packet_buffer)
update_initial_durations(s, st, pkt);
if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
/* correct timestamps with byte offset if demuxers only have timestamps
on packet boundaries */
@ -1029,12 +1035,16 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
st->last_IP_pts= pkt->pts;
/* cannot compute PTS if not present (we can compute it only
by knowing the future */
} else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
} else if (pkt->pts != AV_NOPTS_VALUE ||
pkt->dts != AV_NOPTS_VALUE ||
pkt->duration ) {
int duration = pkt->duration;
if(pkt->pts != AV_NOPTS_VALUE && duration){
int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
pkt->pts += pkt->duration;
if(old_diff < new_diff && old_diff < (duration>>3)){
pkt->pts += duration;
// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
}
}
@ -1047,7 +1057,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
if(pkt->pts != AV_NOPTS_VALUE)
st->cur_dts = pkt->pts + pkt->duration;
st->cur_dts = pkt->pts + duration;
}
}
@ -1073,161 +1083,215 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
pkt->convergence_duration = pc->convergence_duration;
}
static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
{
while (*pkt_buf) {
AVPacketList *pktl = *pkt_buf;
*pkt_buf = pktl->next;
av_free_packet(&pktl->pkt);
av_freep(&pktl);
}
*pkt_buf_end = NULL;
}
/**
* Parse a packet, add all split parts to parse_queue
*
* @param pkt packet to parse, NULL when flushing the parser at end of stream
*/
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
{
AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
AVStream *st = s->streams[stream_index];
uint8_t *data = pkt ? pkt->data : NULL;
int size = pkt ? pkt->size : 0;
int ret = 0, got_output = 0;
if (!pkt) {
av_init_packet(&flush_pkt);
pkt = &flush_pkt;
got_output = 1;
}
while (size > 0 || (pkt == &flush_pkt && got_output)) {
int len;
av_init_packet(&out_pkt);
len = av_parser_parse2(st->parser, st->codec,
&out_pkt.data, &out_pkt.size, data, size,
pkt->pts, pkt->dts, pkt->pos);
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
/* increment read pointer */
data += len;
size -= len;
got_output = !!out_pkt.size;
if (!out_pkt.size)
continue;
/* set the duration */
out_pkt.duration = 0;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->sample_rate > 0) {
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
(AVRational){ 1, st->codec->sample_rate },
st->time_base,
AV_ROUND_DOWN);
}
} else if (st->codec->time_base.num != 0 &&
st->codec->time_base.den != 0) {
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
st->codec->time_base,
st->time_base,
AV_ROUND_DOWN);
}
out_pkt.stream_index = st->index;
out_pkt.pts = st->parser->pts;
out_pkt.dts = st->parser->dts;
out_pkt.pos = st->parser->pos;
if (st->parser->key_frame == 1 ||
(st->parser->key_frame == -1 &&
st->parser->pict_type == AV_PICTURE_TYPE_I))
out_pkt.flags |= AV_PKT_FLAG_KEY;
compute_pkt_fields(s, st, st->parser, &out_pkt);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
out_pkt.flags & AV_PKT_FLAG_KEY) {
int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
ff_reduce_index(s, st->index);
av_add_index_entry(st, pos, out_pkt.dts,
0, 0, AVINDEX_KEYFRAME);
}
if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
out_pkt.destruct = pkt->destruct;
pkt->destruct = NULL;
}
if ((ret = av_dup_packet(&out_pkt)) < 0)
goto fail;
if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
av_free_packet(&out_pkt);
ret = AVERROR(ENOMEM);
goto fail;
}
}
/* end of the stream => close and free the parser */
if (pkt == &flush_pkt) {
av_parser_close(st->parser);
st->parser = NULL;
}
fail:
av_free_packet(pkt);
return ret;
}
static int read_from_packet_buffer(AVPacketList **pkt_buffer,
AVPacketList **pkt_buffer_end,
AVPacket *pkt)
{
AVPacketList *pktl;
av_assert0(*pkt_buffer);
pktl = *pkt_buffer;
*pkt = pktl->pkt;
*pkt_buffer = pktl->next;
if (!pktl->next)
*pkt_buffer_end = NULL;
av_freep(&pktl);
return 0;
}
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
AVStream *st;
int len, ret, i;
int ret = 0, i, got_packet = 0;
av_init_packet(pkt);
for(;;) {
/* select current input stream component */
st = s->cur_st;
if (st) {
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
/* raw data support */
*pkt = st->cur_pkt;
st->cur_pkt.data= NULL;
st->cur_pkt.side_data_elems = 0;
st->cur_pkt.side_data = NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
break;
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
st->cur_ptr, st->cur_len,
st->cur_pkt.pts, st->cur_pkt.dts,
st->cur_pkt.pos);
st->cur_pkt.pts = AV_NOPTS_VALUE;
st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
st->cur_ptr += len;
st->cur_len -= len;
while (!got_packet && !s->parse_queue) {
AVStream *st;
AVPacket cur_pkt;
/* return packet if any */
if (pkt->size) {
got_packet:
pkt->duration = 0;
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (st->codec->sample_rate > 0) {
pkt->duration = av_rescale_q_rnd(st->parser->duration,
(AVRational){ 1, st->codec->sample_rate },
st->time_base,
AV_ROUND_DOWN);
}
} else if (st->codec->time_base.num != 0 &&
st->codec->time_base.den != 0) {
pkt->duration = av_rescale_q_rnd(st->parser->duration,
st->codec->time_base,
st->time_base,
AV_ROUND_DOWN);
}
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
pkt->dts = st->parser->dts;
pkt->pos = st->parser->pos;
if (st->parser->key_frame == 1 ||
(st->parser->key_frame == -1 &&
st->parser->pict_type == AV_PICTURE_TYPE_I))
pkt->flags |= AV_PKT_FLAG_KEY;
if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
s->cur_st = NULL;
pkt->destruct= st->cur_pkt.destruct;
st->cur_pkt.destruct= NULL;
st->cur_pkt.data = NULL;
assert(st->cur_len == 0);
}else{
pkt->destruct = NULL;
}
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
ff_reduce_index(s, st->index);
av_add_index_entry(st, pos, pkt->dts,
0, 0, AVINDEX_KEYFRAME);
}
break;
}
} else {
/* free packet */
av_free_packet(&st->cur_pkt);
s->cur_st = NULL;
}
} else {
AVPacket cur_pkt;
/* read next packet */
ret = av_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* return the last frames, if any */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
av_parser_parse2(st->parser, st->codec,
&pkt->data, &pkt->size,
NULL, 0,
AV_NOPTS_VALUE, AV_NOPTS_VALUE,
AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
}
}
/* no more packets: really terminate parsing */
/* read next packet */
ret = av_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* flush the parsers */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing)
parse_packet(s, NULL, st->index);
}
st = s->streams[cur_pkt.stream_index];
st->cur_pkt= cur_pkt;
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
ret = 0;
st = s->streams[cur_pkt.stream_index];
if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
st->cur_pkt.dts != AV_NOPTS_VALUE &&
st->cur_pkt.pts < st->cur_pkt.dts){
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
st->cur_pkt.stream_index,
st->cur_pkt.pts,
st->cur_pkt.dts,
st->cur_pkt.size);
// av_free_packet(&st->cur_pkt);
// return -1;
}
if (cur_pkt.pts != AV_NOPTS_VALUE &&
cur_pkt.dts != AV_NOPTS_VALUE &&
cur_pkt.pts < cur_pkt.dts) {
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
cur_pkt.stream_index,
cur_pkt.pts,
cur_pkt.dts,
cur_pkt.size);
}
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
cur_pkt.stream_index,
cur_pkt.pts,
cur_pkt.dts,
cur_pkt.size,
cur_pkt.duration,
cur_pkt.flags);
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
st->cur_pkt.stream_index,
st->cur_pkt.pts,
st->cur_pkt.dts,
st->cur_pkt.size,
st->cur_pkt.duration,
st->cur_pkt.flags);
s->cur_st = st;
st->cur_ptr = st->cur_pkt.data;
st->cur_len = st->cur_pkt.size;
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) {
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codec->codec_id));
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
}else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
st->parser->flags |= PARSER_FLAG_ONCE;
}
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) {
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codec->codec_id));
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
} else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
} else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
st->parser->flags |= PARSER_FLAG_ONCE;
}
}
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
*pkt = cur_pkt;
compute_pkt_fields(s, st, NULL, pkt);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
got_packet = 1;
} else if (st->discard < AVDISCARD_ALL) {
if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
return ret;
} else {
/* free packet */
av_free_packet(&cur_pkt);
}
}
if (!got_packet && s->parse_queue)
ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
pkt->stream_index,
@ -1237,17 +1301,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
pkt->duration,
pkt->flags);
return 0;
}
static int read_from_packet_buffer(AVFormatContext *s, AVPacket *pkt)
{
AVPacketList *pktl = s->packet_buffer;
av_assert0(pktl);
*pkt = pktl->pkt;
s->packet_buffer = pktl->next;
av_freep(&pktl);
return 0;
return ret;
}
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
@ -1256,7 +1310,9 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
int eof = 0;
if (!genpts)
return s->packet_buffer ? read_from_packet_buffer(s, pkt) :
return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
&s->packet_buffer_end,
pkt) :
read_frame_internal(s, pkt);
for (;;) {
@ -1282,7 +1338,8 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
/* read packet from packet buffer, if there is data */
if (!(next_pkt->pts == AV_NOPTS_VALUE &&
next_pkt->dts != AV_NOPTS_VALUE && !eof))
return read_from_packet_buffer(s, pkt);
return read_from_packet_buffer(&s->packet_buffer,
&s->packet_buffer_end, pkt);
}
ret = read_frame_internal(s, pkt);
@ -1303,24 +1360,10 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
/* XXX: suppress the packet queue */
static void flush_packet_queue(AVFormatContext *s)
{
AVPacketList *pktl;
free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
for(;;) {
pktl = s->packet_buffer;
if (!pktl)
break;
s->packet_buffer = pktl->next;
av_free_packet(&pktl->pkt);
av_free(pktl);
}
while(s->raw_packet_buffer){
pktl = s->raw_packet_buffer;
s->raw_packet_buffer = pktl->next;
av_free_packet(&pktl->pkt);
av_free(pktl);
}
s->packet_buffer_end=
s->raw_packet_buffer_end= NULL;
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
}
@ -1356,8 +1399,6 @@ void ff_read_frame_flush(AVFormatContext *s)
flush_packet_queue(s);
s->cur_st = NULL;
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
@ -1365,15 +1406,11 @@ void ff_read_frame_flush(AVFormatContext *s)
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
av_free_packet(&st->cur_pkt);
}
st->last_IP_pts = AV_NOPTS_VALUE;
if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = 0;
else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
st->reference_dts = AV_NOPTS_VALUE;
/* fail safe */
st->cur_ptr = NULL;
st->cur_len = 0;
st->probe_packets = MAX_PROBE_PACKETS;
@ -1992,8 +2029,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
int64_t filesize, offset, duration;
int retry=0;
ic->cur_st = NULL;
/* flush packet queue */
flush_packet_queue(ic);
@ -2005,7 +2040,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
if (st->parser) {
av_parser_close(st->parser);
st->parser= NULL;
av_free_packet(&st->cur_pkt);
}
}
@ -2107,17 +2141,22 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
}
}
static int has_codec_parameters(AVCodecContext *avctx)
static int has_codec_parameters(AVStream *st)
{
AVCodecContext *avctx = st->codec;
int val;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
val = avctx->sample_rate && avctx->channels;
if (!avctx->frame_size && determinable_frame_size(avctx))
return 0;
if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
return 0;
break;
case AVMEDIA_TYPE_VIDEO:
val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
val = avctx->width;
if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
return 0;
break;
case AVMEDIA_TYPE_DATA:
if(avctx->codec_id == CODEC_ID_NONE) return 1;
@ -2142,14 +2181,16 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
AVFrame picture;
AVPacket pkt = *avpkt;
if (!avcodec_is_open(st->codec)) {
if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
AVDictionary *thread_opt = NULL;
codec = st->codec->codec ? st->codec->codec :
avcodec_find_decoder(st->codec->codec_id);
if (!codec)
if (!codec) {
st->info->found_decoder = -1;
return -1;
}
/* force thread count to 1 since the h264 decoder will not extract SPS
* and PPS to extradata during multi-threaded decoding */
@ -2157,13 +2198,20 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
if (!options)
av_dict_free(&thread_opt);
if (ret < 0)
if (ret < 0) {
st->info->found_decoder = -1;
return ret;
}
}
st->info->found_decoder = 1;
} else if (!st->info->found_decoder)
st->info->found_decoder = 1;
if (st->info->found_decoder < 0)
return -1;
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
(!has_codec_parameters(st->codec) ||
(!has_codec_parameters(st) ||
!has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
got_picture = 0;
@ -2331,7 +2379,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
: &thread_opt);
//try to just open decoders, in case this is enough to get parameters
if(!has_codec_parameters(st->codec)){
if (!has_codec_parameters(st)) {
if (codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i]
: &thread_opt);
@ -2358,7 +2406,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
int fps_analyze_framecount = 20;
st = ic->streams[i];
if (!has_codec_parameters(st->codec))
if (!has_codec_parameters(st))
break;
/* if the timebase is coarse (like the usual millisecond precision
of mkv), we need to analyze more frames to reliably arrive at
@ -2483,7 +2531,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
if (flush_codecs) {
AVPacket empty_pkt = { 0 };
int err;
int err = 0;
av_init_packet(&empty_pkt);
ret = -1; /* we could not have all the codec parameters before EOF */
@ -2491,17 +2539,20 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
st = ic->streams[i];
/* flush the decoders */
do {
err = try_decode_frame(st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL);
} while (err > 0 && !has_codec_parameters(st->codec));
if (st->info->found_decoder == 1) {
do {
err = try_decode_frame(st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL);
} while (err > 0 && !has_codec_parameters(st));
if (err < 0) {
av_log(ic, AV_LOG_INFO,
"decoding for stream %d failed\n", st->index);
if (err < 0) {
av_log(ic, AV_LOG_INFO,
"decoding for stream %d failed\n", st->index);
}
}
if (!has_codec_parameters(st->codec)){
if (!has_codec_parameters(st)){
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
av_log(ic, AV_LOG_WARNING,
@ -2738,7 +2789,6 @@ void avformat_free_context(AVFormatContext *s)
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
av_free_packet(&st->cur_pkt);
}
if (st->attached_pic.data)
av_free_packet(&st->attached_pic);
@ -3193,7 +3243,7 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
/* update pts */
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
frame_size = get_audio_frame_size(st->codec, pkt->size);
frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
/* HACK/FIXME, we skip the initial 0 size packets as they are most
likely equal to the encoder delay, but it would be better if we

View File

@ -64,3 +64,4 @@
0, 19, 19, 1, 921600, 0x5639e670
1, 21000, 21000, 500, 1000, 0xa491f3ef
1, 21500, 21500, 500, 1000, 0x2c036e18
1, 22000, 22000, 500, 1000, 0x52d65e2a

View File

@ -1,7 +1,7 @@
#tb 0: 1/25
0, 0, 0, 1, 38016, 0xa6f15db5
0, 1, 1, 1, 38016, 0xa6f15db5
0, 2, 2, 1, 38016, 0xa6f15db5
0, 3, 3, 1, 38016, 0xa6f15db5
0, 4, 4, 1, 38016, 0x5c4ef0e7
0, 5, 5, 1, 38016, 0x53a42d1d
0, 6, 6, 1, 38016, 0x68f7d89e

View File

@ -1,7 +1,7 @@
#tb 0: 1/25
0, 0, 0, 1, 115200, 0xb8830eef
0, 1, 1, 1, 115200, 0xb8830eef
0, 2, 2, 1, 115200, 0xb8830eef
0, 3, 3, 1, 115200, 0xb8830eef
0, 4, 4, 1, 115200, 0x952ff5e1
0, 5, 5, 1, 115200, 0xa4362b14
0, 6, 6, 1, 115200, 0x32bacbe7

View File

@ -1,14 +1,14 @@
#tb 0: 1/1000
#tb 1: 1/1000
0, 0, 0, 0, 282, 0x000d949a
1, 0, 0, 435, 1088, 0x5cd379bb
1, 435, 435, 435, 1088, 0x8dfa1368
1, 740, 740, 435, 1088, 0xc0d211be
1, 1023, 1023, 435, 1088, 0x8238113a
1, 0, 0, 0, 1088, 0x5cd379bb
1, 435, 435, 0, 1088, 0x8dfa1368
1, 740, 740, 0, 1088, 0xc0d211be
1, 1023, 1023, 0, 1088, 0x8238113a
0, 1208, 1208, 0, 137, 0x903c415e
0, 1250, 1250, 0, 942, 0xd5b7d2aa
0, 1291, 1291, 0, 841, 0xaffd8ce6
1, 1306, 1306, 435, 1088, 0x9f8924b7
1, 1306, 1306, 0, 1088, 0x9f8924b7
0, 1333, 1333, 0, 1164, 0x4ed84836
0, 1375, 1375, 0, 1492, 0x37f3e8aa
0, 1416, 1416, 0, 1663, 0xc091392d
@ -16,14 +16,14 @@
0, 1500, 1500, 0, 1721, 0x7bdb3dd0
0, 1541, 1541, 0, 1410, 0xde689881
0, 1583, 1583, 0, 1258, 0xb5b86920
1, 1589, 1589, 435, 1088, 0x767f317a
1, 1589, 1589, 0, 1088, 0x767f317a
0, 1625, 1625, 0, 2050, 0x99b6d7c7
0, 1666, 1666, 0, 1242, 0x9ba35009
0, 1708, 1708, 0, 1630, 0x17f10192
0, 1750, 1750, 0, 1747, 0xbbee59d7
0, 1791, 1791, 0, 1565, 0xb09b00d9
0, 1833, 1833, 0, 1573, 0xd2e62201
1, 1872, 1872, 435, 1088, 0x57000d38
1, 1872, 1872, 0, 1088, 0x57000d38
0, 1875, 1875, 0, 1353, 0x2305a24d
0, 1916, 1916, 0, 1425, 0xf41bbb46
0, 1958, 1958, 0, 1355, 0xfc08a762
@ -32,7 +32,7 @@
0, 2083, 2083, 0, 1967, 0x43d61723
0, 2125, 2125, 0, 1378, 0xde22c753
0, 2166, 2166, 0, 961, 0x2418a4da
1, 2198, 2198, 435, 1088, 0xad977261
1, 2198, 2198, 0, 1088, 0xad977261
0, 2208, 2208, 0, 968, 0x0d04ba51
0, 2250, 2250, 0, 1140, 0x737f3543
0, 2291, 2291, 0, 1119, 0x3c050388
@ -64,7 +64,7 @@
0, 3375, 3375, 41, 1408, 0x5585c25c
0, 3416, 3416, 41, 1551, 0x42002c8d
0, 3458, 3458, 41, 1524, 0xbcb609e3
1, 3482, 3482, 435, 1088, 0xdce57471
1, 3482, 3482, 0, 1088, 0xdce57471
0, 3500, 3500, 41, 1554, 0x3d740564
0, 3541, 3541, 41, 1467, 0xc349f2d7
0, 3583, 3583, 41, 1066, 0xb7401462
@ -76,7 +76,7 @@
0, 3833, 3833, 41, 1175, 0x12ad3c1e
0, 3875, 3875, 41, 1179, 0x7e034570
0, 3916, 3916, 41, 1136, 0x5c633c51
1, 3918, 3918, 435, 1088, 0xf3887977
1, 3918, 3918, 0, 1088, 0xf3887977
0, 3958, 3958, 41, 1064, 0x5eb51d89
0, 4000, 4000, 41, 953, 0xe148bbdd
0, 4041, 4041, 41, 989, 0x901ec306
@ -87,7 +87,7 @@
0, 4250, 4250, 41, 1348, 0x27cfa91b
0, 4291, 4291, 41, 1417, 0x2312d70e
0, 4333, 4333, 41, 1285, 0x46ca4cca
1, 4353, 4353, 435, 1088, 0x1d6c8ed2
1, 4353, 4353, 0, 1088, 0x1d6c8ed2
0, 4375, 4375, 41, 1037, 0xcf09dd3d
0, 4416, 4416, 41, 1005, 0xe780cf1f
0, 4458, 4458, 41, 890, 0x8b1d8c1b
@ -95,7 +95,7 @@
0, 4541, 4541, 41, 803, 0x935e775e
0, 4583, 4583, 41, 1035, 0x6a220483
0, 4625, 4625, 41, 466, 0xd88bb237
1, 4789, 4789, 435, 1088, 0x09115bae
1, 4789, 4789, 0, 1088, 0x09115bae
0, 4916, 4916, 41, 945, 0x8f2eb1ec
0, 4958, 4958, 41, 1190, 0x4c451c1b
0, 5000, 5000, 41, 1811, 0x727c52cb
@ -104,7 +104,7 @@
0, 5125, 5125, 41, 1707, 0x3d1a6464
0, 5166, 5166, 41, 1103, 0x06b22710
0, 5208, 5208, 41, 1122, 0x656725b8
1, 5224, 5224, 435, 1088, 0x0c8b9372
1, 5224, 5224, 0, 1088, 0x0c8b9372
0, 5250, 5250, 41, 1150, 0xf9674678
0, 5291, 5291, 41, 1438, 0x03fac426
0, 5333, 5333, 41, 1623, 0x7adb1321
@ -115,7 +115,7 @@
0, 5541, 5541, 41, 1262, 0xb994692f
0, 5583, 5583, 41, 2097, 0xf4eb663f
0, 5625, 5625, 41, 1251, 0xfd4f633a
1, 5659, 5659, 435, 1088, 0x75a82540
1, 5659, 5659, 0, 1088, 0x75a82540
0, 5666, 5666, 41, 1633, 0xb7e1290e
0, 5708, 5708, 41, 1739, 0xecd18c38
0, 5750, 5750, 41, 1132, 0xc83e1828
@ -125,7 +125,7 @@
0, 5916, 5916, 41, 1340, 0xeaa2a231
0, 5958, 5958, 41, 1102, 0x82de2889
0, 6000, 6000, 41, 1834, 0x59b99b92
1, 6008, 6008, 435, 1088, 0x690312b0
1, 6008, 6008, 0, 1088, 0x690312b0
0, 6041, 6041, 41, 1332, 0x0610813a
0, 6083, 6083, 41, 1275, 0x5b0d7be7
0, 6125, 6125, 41, 1376, 0xd915b0fe
@ -133,7 +133,7 @@
0, 6208, 6208, 41, 1360, 0x3bcd93d3
0, 6250, 6250, 41, 1330, 0xd0439c93
0, 6291, 6291, 41, 1562, 0xb2560a09
1, 6312, 6312, 435, 1088, 0x76d50ff3
1, 6312, 6312, 0, 1088, 0x76d50ff3
0, 6333, 6333, 41, 1376, 0x4f9eb447
0, 6375, 6375, 41, 1405, 0x85d3b084
0, 6416, 6416, 41, 1344, 0xcdbda2ae
@ -141,12 +141,12 @@
0, 6500, 6500, 41, 1459, 0xf9d2c56f
0, 6541, 6541, 41, 1275, 0xf5536d81
0, 6583, 6583, 41, 1209, 0x3b5b4ea5
1, 6595, 6595, 435, 1088, 0x8766276f
1, 6595, 6595, 0, 1088, 0x8766276f
0, 6625, 6625, 41, 1352, 0x7b199d28
0, 6666, 6666, 41, 1349, 0x02adaaf3
0, 6708, 6708, 41, 1464, 0x20d7cfd2
0, 6750, 6750, 41, 1377, 0x78e0b1f4
0, 6791, 6791, 41, 289, 0x1f2e9246
1, 6878, 6878, 435, 1088, 0x678f20fd
1, 7161, 7161, 435, 1088, 0x718afa20
1, 7444, 7444, 435, 1088, 0x758f0939
1, 6878, 6878, 0, 1088, 0x678f20fd
1, 7161, 7161, 0, 1088, 0x718afa20
1, 7444, 7444, 0, 1088, 0x758f0939

View File

@ -2,52 +2,52 @@ ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st:-1 flags:0 ts:-1.000000
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st:-1 flags:1 ts: 1.894167
ret: 0 st: 0 flags:1 dts: 1.894059 pts: 1.894059 pos: 88812 size: 68
ret: 0 st: 0 flags:1 dts: 1.893878 pts: 1.893878 pos: 88812 size: 68
ret: 0 st: 0 flags:0 ts: 0.788345
ret: 0 st: 0 flags:1 dts: 0.789546 pts: 0.789546 pos: 37064 size: 68
ret: 0 st: 0 flags:1 dts: 0.789478 pts: 0.789478 pos: 37064 size: 68
ret: 0 st: 0 flags:1 ts:-0.317506
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st:-1 flags:0 ts: 2.576668
ret: 0 st: 0 flags:1 dts: 2.577642 pts: 2.577642 pos: 120840 size: 68
ret: 0 st: 0 flags:1 dts: 2.577438 pts: 2.577438 pos: 120840 size: 68
ret: 0 st:-1 flags:1 ts: 1.470835
ret: 0 st: 0 flags:1 dts: 1.470249 pts: 1.470249 pos: 68956 size: 68
ret: 0 st: 0 flags:1 dts: 1.470113 pts: 1.470113 pos: 68956 size: 68
ret: 0 st: 0 flags:0 ts: 0.365011
ret: 0 st: 0 flags:1 dts: 0.365737 pts: 0.365737 pos: 17208 size: 68
ret: 0 st: 0 flags:1 dts: 0.365714 pts: 0.365714 pos: 17208 size: 68
ret: 0 st: 0 flags:1 ts:-0.740839
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st:-1 flags:0 ts: 2.153336
ret: 0 st: 0 flags:1 dts: 2.153855 pts: 2.153855 pos: 100984 size: 68
ret: 0 st: 0 flags:1 dts: 2.153673 pts: 2.153673 pos: 100984 size: 68
ret: 0 st:-1 flags:1 ts: 1.047503
ret: 0 st: 0 flags:1 dts: 1.046440 pts: 1.046440 pos: 49100 size: 68
ret: 0 st: 0 flags:1 dts: 1.046349 pts: 1.046349 pos: 49100 size: 68
ret: 0 st: 0 flags:0 ts:-0.058322
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st: 0 flags:1 ts: 2.835828
ret: 0 st: 0 flags:1 dts: 2.834535 pts: 2.834535 pos: 132876 size: 68
ret: 0 st: 0 flags:1 dts: 2.835760 pts: 2.835760 pos: 132944 size: 68
ret: 0 st:-1 flags:0 ts: 1.730004
ret: 0 st: 0 flags:1 dts: 1.730045 pts: 1.730045 pos: 81128 size: 68
ret: 0 st: 0 flags:1 dts: 1.731338 pts: 1.731338 pos: 81196 size: 68
ret: 0 st:-1 flags:1 ts: 0.624171
ret: 0 st: 0 flags:1 dts: 0.624082 pts: 0.624082 pos: 29312 size: 68
ret: 0 st: 0 flags:1 dts: 0.624036 pts: 0.624036 pos: 29312 size: 68
ret: 0 st: 0 flags:0 ts:-0.481655
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st: 0 flags:1 ts: 2.412494
ret: 0 st: 0 flags:1 dts: 2.412200 pts: 2.412200 pos: 113088 size: 68
ret: 0 st: 0 flags:1 dts: 2.411995 pts: 2.411995 pos: 113088 size: 68
ret: 0 st:-1 flags:0 ts: 1.306672
ret: 0 st: 0 flags:1 dts: 1.307687 pts: 1.307687 pos: 61340 size: 68
ret: 0 st: 0 flags:1 dts: 1.307574 pts: 1.307574 pos: 61340 size: 68
ret: 0 st:-1 flags:1 ts: 0.200839
ret: 0 st: 0 flags:1 dts: 0.200295 pts: 0.200295 pos: 9456 size: 68
ret: 0 st: 0 flags:1 dts: 0.200272 pts: 0.200272 pos: 9456 size: 68
ret: 0 st: 0 flags:0 ts:-0.904989
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st: 0 flags:1 ts: 1.989184
ret: 0 st: 0 flags:1 dts: 1.988390 pts: 1.988390 pos: 93232 size: 68
ret: 0 st: 0 flags:1 dts: 1.988209 pts: 1.988209 pos: 93232 size: 68
ret: 0 st:-1 flags:0 ts: 0.883340
ret: 0 st: 0 flags:1 dts: 0.883900 pts: 0.883900 pos: 41484 size: 68
ret: 0 st: 0 flags:1 dts: 0.883810 pts: 0.883810 pos: 41484 size: 68
ret: 0 st:-1 flags:1 ts:-0.222493
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68
ret: 0 st: 0 flags:0 ts: 2.671678
ret: 0 st: 0 flags:1 dts: 2.671995 pts: 2.671995 pos: 125260 size: 68
ret: 0 st: 0 flags:1 dts: 2.671769 pts: 2.671769 pos: 125260 size: 68
ret: 0 st: 0 flags:1 ts: 1.565850
ret: 0 st: 0 flags:1 dts: 1.564580 pts: 1.564580 pos: 73376 size: 68
ret: 0 st: 0 flags:1 dts: 1.564444 pts: 1.564444 pos: 73376 size: 68
ret: 0 st:-1 flags:0 ts: 0.460008
ret: 0 st: 0 flags:1 dts: 0.460091 pts: 0.460091 pos: 21628 size: 68
ret: 0 st: 0 flags:1 dts: 0.460045 pts: 0.460045 pos: 21628 size: 68
ret: 0 st:-1 flags:1 ts:-0.645825
ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 72 size: 68