1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master: (22 commits)
  prores: add FATE tests
  id3v2: reduce the scope of some non-globally-used symbols/structures
  id3v2: cosmetics: move some declarations before the places they are used
  shorten: remove the flush function.
  shn: do not allow seeking in the raw shn demuxer.
  avformat: add AVInputFormat flag AVFMT_NO_BYTE_SEEK.
  avformat: update AVInputFormat allowed flags
  avformat: don't unconditionally call ff_read_frame_flush() when trying to seek.
  truespeech: use sizeof() instead of hardcoded sizes
  truespeech: remove unneeded variable, 'consumed'
  truespeech: simplify truespeech_read_frame() by using get_bits()
  truespeech: decode directly to output buffer instead of a temp buffer
  truespeech: check to make sure channels == 1
  truespeech: check for large enough output buffer rather than truncating output
  truespeech: remove unneeded zero-size packet check.
  mlpdec: return meaningful error codes instead of -1
  mlpdec: remove unnecessary wrapper function
  mlpdec: only calculate output size once
  mlpdec: validate that the reported channel count matches the actual output channel count
  pcm: reduce pointer type casting
  ...

Conflicts:
	libavformat/avformat.h
	libavformat/id3v2.c
	libavformat/id3v2.h
	libavformat/utils.c
	libavformat/version.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-10-14 03:43:24 +02:00
commit 91eb1b1525
21 changed files with 322 additions and 322 deletions

View File

@ -13,6 +13,9 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2011-xx-xx - xxxxxxx - lavf 53.9.0
Add AVFMT_NO_BYTE_SEEK AVInputFormat flag.
2011-10-12 - lavu 51.12.0 2011-10-12 - lavu 51.12.0
AVOptions API rewrite. AVOptions API rewrite.

View File

@ -116,7 +116,9 @@ static inline av_const int mid_pred(int a, int b, int c)
#ifndef sign_extend #ifndef sign_extend
static inline av_const int sign_extend(int val, unsigned bits) static inline av_const int sign_extend(int val, unsigned bits)
{ {
return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits); unsigned shift = 8 * sizeof(int) - bits;
union { unsigned u; int s; } v = { (unsigned) val << shift };
return v.s >> shift;
} }
#endif #endif

View File

@ -138,11 +138,11 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
checksum = ff_mlp_checksum16(gb->buffer, 26); checksum = ff_mlp_checksum16(gb->buffer, 26);
if (checksum != AV_RL16(gb->buffer+26)) { if (checksum != AV_RL16(gb->buffer+26)) {
av_log(log, AV_LOG_ERROR, "major sync info header checksum error\n"); av_log(log, AV_LOG_ERROR, "major sync info header checksum error\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */ if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */
return -1; return AVERROR_INVALIDDATA;
mh->stream_type = get_bits(gb, 8); mh->stream_type = get_bits(gb, 8);
@ -173,7 +173,7 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
mh->channels_thd_stream2 = get_bits(gb, 13); mh->channels_thd_stream2 = get_bits(gb, 13);
} else } else
return -1; return AVERROR_INVALIDDATA;
mh->access_unit_size = 40 << (ratebits & 7); mh->access_unit_size = 40 << (ratebits & 7);
mh->access_unit_size_pow2 = 64 << (ratebits & 7); mh->access_unit_size_pow2 = 64 << (ratebits & 7);

View File

@ -217,7 +217,7 @@ static inline int read_huff_channels(MLPDecodeContext *m, GetBitContext *gbp,
VLC_BITS, (9 + VLC_BITS - 1) / VLC_BITS); VLC_BITS, (9 + VLC_BITS - 1) / VLC_BITS);
if (result < 0) if (result < 0)
return -1; return AVERROR_INVALIDDATA;
if (lsb_bits > 0) if (lsb_bits > 0)
result = (result << lsb_bits) + get_bits(gbp, lsb_bits); result = (result << lsb_bits) + get_bits(gbp, lsb_bits);
@ -253,61 +253,61 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb) static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
{ {
MLPHeaderInfo mh; MLPHeaderInfo mh;
int substr; int substr, ret;
if (ff_mlp_read_major_sync(m->avctx, &mh, gb) != 0) if ((ret = ff_mlp_read_major_sync(m->avctx, &mh, gb)) != 0)
return -1; return ret;
if (mh.group1_bits == 0) { if (mh.group1_bits == 0) {
av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown bits per sample\n"); av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown bits per sample\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.group2_bits > mh.group1_bits) { if (mh.group2_bits > mh.group1_bits) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Channel group 2 cannot have more bits per sample than group 1.\n"); "Channel group 2 cannot have more bits per sample than group 1.\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.group2_samplerate && mh.group2_samplerate != mh.group1_samplerate) { if (mh.group2_samplerate && mh.group2_samplerate != mh.group1_samplerate) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Channel groups with differing sample rates are not currently supported.\n"); "Channel groups with differing sample rates are not currently supported.\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.group1_samplerate == 0) { if (mh.group1_samplerate == 0) {
av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown sampling rate\n"); av_log(m->avctx, AV_LOG_ERROR, "invalid/unknown sampling rate\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.group1_samplerate > MAX_SAMPLERATE) { if (mh.group1_samplerate > MAX_SAMPLERATE) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Sampling rate %d is greater than the supported maximum (%d).\n", "Sampling rate %d is greater than the supported maximum (%d).\n",
mh.group1_samplerate, MAX_SAMPLERATE); mh.group1_samplerate, MAX_SAMPLERATE);
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.access_unit_size > MAX_BLOCKSIZE) { if (mh.access_unit_size > MAX_BLOCKSIZE) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Block size %d is greater than the supported maximum (%d).\n", "Block size %d is greater than the supported maximum (%d).\n",
mh.access_unit_size, MAX_BLOCKSIZE); mh.access_unit_size, MAX_BLOCKSIZE);
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.access_unit_size_pow2 > MAX_BLOCKSIZE_POW2) { if (mh.access_unit_size_pow2 > MAX_BLOCKSIZE_POW2) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Block size pow2 %d is greater than the supported maximum (%d).\n", "Block size pow2 %d is greater than the supported maximum (%d).\n",
mh.access_unit_size_pow2, MAX_BLOCKSIZE_POW2); mh.access_unit_size_pow2, MAX_BLOCKSIZE_POW2);
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.num_substreams == 0) if (mh.num_substreams == 0)
return -1; return AVERROR_INVALIDDATA;
if (m->avctx->codec_id == CODEC_ID_MLP && mh.num_substreams > 2) { if (m->avctx->codec_id == CODEC_ID_MLP && mh.num_substreams > 2) {
av_log(m->avctx, AV_LOG_ERROR, "MLP only supports up to 2 substreams.\n"); av_log(m->avctx, AV_LOG_ERROR, "MLP only supports up to 2 substreams.\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (mh.num_substreams > MAX_SUBSTREAMS) { if (mh.num_substreams > MAX_SUBSTREAMS) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Number of substreams %d is larger than the maximum supported " "Number of substreams %d is larger than the maximum supported "
"by the decoder. %s\n", mh.num_substreams, sample_message); "by the decoder. %s\n", mh.num_substreams, sample_message);
return -1; return AVERROR_INVALIDDATA;
} }
m->access_unit_size = mh.access_unit_size; m->access_unit_size = mh.access_unit_size;
@ -374,14 +374,14 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
if (sync_word != 0x31ea >> 1) { if (sync_word != 0x31ea >> 1) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"restart header sync incorrect (got 0x%04x)\n", sync_word); "restart header sync incorrect (got 0x%04x)\n", sync_word);
return -1; return AVERROR_INVALIDDATA;
} }
s->noise_type = get_bits1(gbp); s->noise_type = get_bits1(gbp);
if (m->avctx->codec_id == CODEC_ID_MLP && s->noise_type) { if (m->avctx->codec_id == CODEC_ID_MLP && s->noise_type) {
av_log(m->avctx, AV_LOG_ERROR, "MLP must have 0x31ea sync word.\n"); av_log(m->avctx, AV_LOG_ERROR, "MLP must have 0x31ea sync word.\n");
return -1; return AVERROR_INVALIDDATA;
} }
skip_bits(gbp, 16); /* Output timestamp */ skip_bits(gbp, 16); /* Output timestamp */
@ -394,13 +394,13 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Max matrix channel cannot be greater than %d.\n", "Max matrix channel cannot be greater than %d.\n",
max_matrix_channel); max_matrix_channel);
return -1; return AVERROR_INVALIDDATA;
} }
if (s->max_channel != s->max_matrix_channel) { if (s->max_channel != s->max_matrix_channel) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Max channel must be equal max matrix channel.\n"); "Max channel must be equal max matrix channel.\n");
return -1; return AVERROR_INVALIDDATA;
} }
/* This should happen for TrueHD streams with >6 channels and MLP's noise /* This should happen for TrueHD streams with >6 channels and MLP's noise
@ -409,13 +409,13 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Number of channels %d is larger than the maximum supported " "Number of channels %d is larger than the maximum supported "
"by the decoder. %s\n", s->max_channel+2, sample_message); "by the decoder. %s\n", s->max_channel+2, sample_message);
return -1; return AVERROR_INVALIDDATA;
} }
if (s->min_channel > s->max_channel) { if (s->min_channel > s->max_channel) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Substream min channel cannot be greater than max channel.\n"); "Substream min channel cannot be greater than max channel.\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (m->avctx->request_channels > 0 if (m->avctx->request_channels > 0
@ -454,7 +454,7 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Assignment of matrix channel %d to invalid output channel %d. %s\n", "Assignment of matrix channel %d to invalid output channel %d. %s\n",
ch, ch_assign, sample_message); ch, ch_assign, sample_message);
return -1; return AVERROR_INVALIDDATA;
} }
s->ch_assign[ch_assign] = ch; s->ch_assign[ch_assign] = ch;
} }
@ -528,7 +528,7 @@ static int read_filter_params(MLPDecodeContext *m, GetBitContext *gbp,
if (m->filter_changed[channel][filter]++ > 1) { if (m->filter_changed[channel][filter]++ > 1) {
av_log(m->avctx, AV_LOG_ERROR, "Filters may change only once per access unit.\n"); av_log(m->avctx, AV_LOG_ERROR, "Filters may change only once per access unit.\n");
return -1; return AVERROR_INVALIDDATA;
} }
order = get_bits(gbp, 4); order = get_bits(gbp, 4);
@ -536,7 +536,7 @@ static int read_filter_params(MLPDecodeContext *m, GetBitContext *gbp,
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"%cIR filter order %d is greater than maximum %d.\n", "%cIR filter order %d is greater than maximum %d.\n",
fchar, order, max_order); fchar, order, max_order);
return -1; return AVERROR_INVALIDDATA;
} }
fp->order = order; fp->order = order;
@ -552,13 +552,13 @@ static int read_filter_params(MLPDecodeContext *m, GetBitContext *gbp,
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"%cIR filter coeff_bits must be between 1 and 16.\n", "%cIR filter coeff_bits must be between 1 and 16.\n",
fchar); fchar);
return -1; return AVERROR_INVALIDDATA;
} }
if (coeff_bits + coeff_shift > 16) { if (coeff_bits + coeff_shift > 16) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Sum of coeff_bits and coeff_shift for %cIR filter must be 16 or less.\n", "Sum of coeff_bits and coeff_shift for %cIR filter must be 16 or less.\n",
fchar); fchar);
return -1; return AVERROR_INVALIDDATA;
} }
for (i = 0; i < order; i++) for (i = 0; i < order; i++)
@ -570,7 +570,7 @@ static int read_filter_params(MLPDecodeContext *m, GetBitContext *gbp,
if (filter == FIR) { if (filter == FIR) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"FIR filter has state data specified.\n"); "FIR filter has state data specified.\n");
return -1; return AVERROR_INVALIDDATA;
} }
state_bits = get_bits(gbp, 4); state_bits = get_bits(gbp, 4);
@ -598,7 +598,7 @@ static int read_matrix_params(MLPDecodeContext *m, unsigned int substr, GetBitCo
if (m->matrix_changed++ > 1) { if (m->matrix_changed++ > 1) {
av_log(m->avctx, AV_LOG_ERROR, "Matrices may change only once per access unit.\n"); av_log(m->avctx, AV_LOG_ERROR, "Matrices may change only once per access unit.\n");
return -1; return AVERROR_INVALIDDATA;
} }
s->num_primitive_matrices = get_bits(gbp, 4); s->num_primitive_matrices = get_bits(gbp, 4);
@ -607,7 +607,7 @@ static int read_matrix_params(MLPDecodeContext *m, unsigned int substr, GetBitCo
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Number of primitive matrices cannot be greater than %d.\n", "Number of primitive matrices cannot be greater than %d.\n",
max_primitive_matrices); max_primitive_matrices);
return -1; return AVERROR_INVALIDDATA;
} }
for (mat = 0; mat < s->num_primitive_matrices; mat++) { for (mat = 0; mat < s->num_primitive_matrices; mat++) {
@ -620,12 +620,12 @@ static int read_matrix_params(MLPDecodeContext *m, unsigned int substr, GetBitCo
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Invalid channel %d specified as output from matrix.\n", "Invalid channel %d specified as output from matrix.\n",
s->matrix_out_ch[mat]); s->matrix_out_ch[mat]);
return -1; return AVERROR_INVALIDDATA;
} }
if (frac_bits > 14) { if (frac_bits > 14) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"Too many fractional bits specified.\n"); "Too many fractional bits specified.\n");
return -1; return AVERROR_INVALIDDATA;
} }
max_chan = s->max_matrix_channel; max_chan = s->max_matrix_channel;
@ -658,27 +658,28 @@ static int read_channel_params(MLPDecodeContext *m, unsigned int substr,
ChannelParams *cp = &s->channel_params[ch]; ChannelParams *cp = &s->channel_params[ch];
FilterParams *fir = &cp->filter_params[FIR]; FilterParams *fir = &cp->filter_params[FIR];
FilterParams *iir = &cp->filter_params[IIR]; FilterParams *iir = &cp->filter_params[IIR];
int ret;
if (s->param_presence_flags & PARAM_FIR) if (s->param_presence_flags & PARAM_FIR)
if (get_bits1(gbp)) if (get_bits1(gbp))
if (read_filter_params(m, gbp, substr, ch, FIR) < 0) if ((ret = read_filter_params(m, gbp, substr, ch, FIR)) < 0)
return -1; return ret;
if (s->param_presence_flags & PARAM_IIR) if (s->param_presence_flags & PARAM_IIR)
if (get_bits1(gbp)) if (get_bits1(gbp))
if (read_filter_params(m, gbp, substr, ch, IIR) < 0) if ((ret = read_filter_params(m, gbp, substr, ch, IIR)) < 0)
return -1; return ret;
if (fir->order + iir->order > 8) { if (fir->order + iir->order > 8) {
av_log(m->avctx, AV_LOG_ERROR, "Total filter orders too high.\n"); av_log(m->avctx, AV_LOG_ERROR, "Total filter orders too high.\n");
return -1; return AVERROR_INVALIDDATA;
} }
if (fir->order && iir->order && if (fir->order && iir->order &&
fir->shift != iir->shift) { fir->shift != iir->shift) {
av_log(m->avctx, AV_LOG_ERROR, av_log(m->avctx, AV_LOG_ERROR,
"FIR and IIR filters must use the same precision.\n"); "FIR and IIR filters must use the same precision.\n");
return -1; return AVERROR_INVALIDDATA;
} }
/* The FIR and IIR filters must have the same precision. /* The FIR and IIR filters must have the same precision.
* To simplify the filtering code, only the precision of the * To simplify the filtering code, only the precision of the
@ -697,7 +698,7 @@ static int read_channel_params(MLPDecodeContext *m, unsigned int substr,
if (cp->huff_lsbs > 24) { if (cp->huff_lsbs > 24) {
av_log(m->avctx, AV_LOG_ERROR, "Invalid huff_lsbs.\n"); av_log(m->avctx, AV_LOG_ERROR, "Invalid huff_lsbs.\n");
return -1; return AVERROR_INVALIDDATA;
} }
cp->sign_huff_offset = calculate_sign_huff(m, substr, ch); cp->sign_huff_offset = calculate_sign_huff(m, substr, ch);
@ -713,6 +714,7 @@ static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
{ {
SubStream *s = &m->substream[substr]; SubStream *s = &m->substream[substr];
unsigned int ch; unsigned int ch;
int ret;
if (s->param_presence_flags & PARAM_PRESENCE) if (s->param_presence_flags & PARAM_PRESENCE)
if (get_bits1(gbp)) if (get_bits1(gbp))
@ -724,14 +726,14 @@ static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
if (s->blocksize < 8 || s->blocksize > m->access_unit_size) { if (s->blocksize < 8 || s->blocksize > m->access_unit_size) {
av_log(m->avctx, AV_LOG_ERROR, "Invalid blocksize."); av_log(m->avctx, AV_LOG_ERROR, "Invalid blocksize.");
s->blocksize = 0; s->blocksize = 0;
return -1; return AVERROR_INVALIDDATA;
} }
} }
if (s->param_presence_flags & PARAM_MATRIX) if (s->param_presence_flags & PARAM_MATRIX)
if (get_bits1(gbp)) if (get_bits1(gbp))
if (read_matrix_params(m, substr, gbp) < 0) if ((ret = read_matrix_params(m, substr, gbp)) < 0)
return -1; return ret;
if (s->param_presence_flags & PARAM_OUTSHIFT) if (s->param_presence_flags & PARAM_OUTSHIFT)
if (get_bits1(gbp)) if (get_bits1(gbp))
@ -750,8 +752,8 @@ static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
for (ch = s->min_channel; ch <= s->max_channel; ch++) for (ch = s->min_channel; ch <= s->max_channel; ch++)
if (get_bits1(gbp)) if (get_bits1(gbp))
if (read_channel_params(m, substr, gbp, ch) < 0) if ((ret = read_channel_params(m, substr, gbp, ch)) < 0)
return -1; return ret;
return 0; return 0;
} }
@ -793,6 +795,7 @@ static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp,
{ {
SubStream *s = &m->substream[substr]; SubStream *s = &m->substream[substr];
unsigned int i, ch, expected_stream_pos = 0; unsigned int i, ch, expected_stream_pos = 0;
int ret;
if (s->data_check_present) { if (s->data_check_present) {
expected_stream_pos = get_bits_count(gbp); expected_stream_pos = get_bits_count(gbp);
@ -803,15 +806,15 @@ static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp,
if (s->blockpos + s->blocksize > m->access_unit_size) { if (s->blockpos + s->blocksize > m->access_unit_size) {
av_log(m->avctx, AV_LOG_ERROR, "too many audio samples in frame\n"); av_log(m->avctx, AV_LOG_ERROR, "too many audio samples in frame\n");
return -1; return AVERROR_INVALIDDATA;
} }
memset(&m->bypassed_lsbs[s->blockpos][0], 0, memset(&m->bypassed_lsbs[s->blockpos][0], 0,
s->blocksize * sizeof(m->bypassed_lsbs[0])); s->blocksize * sizeof(m->bypassed_lsbs[0]));
for (i = 0; i < s->blocksize; i++) for (i = 0; i < s->blocksize; i++)
if (read_huff_channels(m, gbp, substr, i) < 0) if ((ret = read_huff_channels(m, gbp, substr, i)) < 0)
return -1; return ret;
for (ch = s->min_channel; ch <= s->max_channel; ch++) for (ch = s->min_channel; ch <= s->max_channel; ch++)
filter_channel(m, substr, ch); filter_channel(m, substr, ch);
@ -942,16 +945,26 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
/** Write the audio data into the output buffer. */ /** Write the audio data into the output buffer. */
static int output_data_internal(MLPDecodeContext *m, unsigned int substr, static int output_data(MLPDecodeContext *m, unsigned int substr,
uint8_t *data, unsigned int *data_size, int is32) uint8_t *data, unsigned int *data_size)
{ {
SubStream *s = &m->substream[substr]; SubStream *s = &m->substream[substr];
unsigned int i, out_ch = 0; unsigned int i, out_ch = 0;
int out_size;
int32_t *data_32 = (int32_t*) data; int32_t *data_32 = (int32_t*) data;
int16_t *data_16 = (int16_t*) data; int16_t *data_16 = (int16_t*) data;
int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32);
if (*data_size < (s->max_channel + 1) * s->blockpos * (is32 ? 4 : 2)) if (m->avctx->channels != s->max_matrix_channel + 1) {
return -1; av_log(m->avctx, AV_LOG_ERROR, "channel count mismatch\n");
return AVERROR_INVALIDDATA;
}
out_size = s->blockpos * m->avctx->channels *
av_get_bytes_per_sample(m->avctx->sample_fmt);
if (*data_size < out_size)
return AVERROR(EINVAL);
for (i = 0; i < s->blockpos; i++) { for (i = 0; i < s->blockpos; i++) {
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) { for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
@ -964,21 +977,11 @@ static int output_data_internal(MLPDecodeContext *m, unsigned int substr,
} }
} }
*data_size = i * out_ch * (is32 ? 4 : 2); *data_size = out_size;
return 0; return 0;
} }
static int output_data(MLPDecodeContext *m, unsigned int substr,
uint8_t *data, unsigned int *data_size)
{
if (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32)
return output_data_internal(m, substr, data, data_size, 1);
else
return output_data_internal(m, substr, data, data_size, 0);
}
/** Read an access unit from the stream. /** Read an access unit from the stream.
* @return negative on error, 0 if not enough data is present in the input stream, * @return negative on error, 0 if not enough data is present in the input stream,
* otherwise the number of bytes consumed. */ * otherwise the number of bytes consumed. */
@ -997,6 +1000,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
uint8_t substream_parity_present[MAX_SUBSTREAMS]; uint8_t substream_parity_present[MAX_SUBSTREAMS];
uint16_t substream_data_len[MAX_SUBSTREAMS]; uint16_t substream_data_len[MAX_SUBSTREAMS];
uint8_t parity_bits; uint8_t parity_bits;
int ret;
if (buf_size < 4) if (buf_size < 4)
return 0; return 0;
@ -1004,7 +1008,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
length = (AV_RB16(buf) & 0xfff) * 2; length = (AV_RB16(buf) & 0xfff) * 2;
if (length < 4 || length > buf_size) if (length < 4 || length > buf_size)
return -1; return AVERROR_INVALIDDATA;
init_get_bits(&gb, (buf + 4), (length - 4) * 8); init_get_bits(&gb, (buf + 4), (length - 4) * 8);
@ -1110,8 +1114,8 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
if (!s->restart_seen) if (!s->restart_seen)
goto next_substr; goto next_substr;
if (read_block_data(m, &gb, substr) < 0) if ((ret = read_block_data(m, &gb, substr)) < 0)
return -1; return ret;
if (get_bits_count(&gb) >= substream_data_len[substr] * 8) if (get_bits_count(&gb) >= substream_data_len[substr] * 8)
goto substream_length_mismatch; goto substream_length_mismatch;
@ -1124,13 +1128,13 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
int shorten_by; int shorten_by;
if (get_bits(&gb, 16) != 0xD234) if (get_bits(&gb, 16) != 0xD234)
return -1; return AVERROR_INVALIDDATA;
shorten_by = get_bits(&gb, 16); shorten_by = get_bits(&gb, 16);
if (m->avctx->codec_id == CODEC_ID_TRUEHD && shorten_by & 0x2000) if (m->avctx->codec_id == CODEC_ID_TRUEHD && shorten_by & 0x2000)
s->blockpos -= FFMIN(shorten_by & 0x1FFF, s->blockpos); s->blockpos -= FFMIN(shorten_by & 0x1FFF, s->blockpos);
else if (m->avctx->codec_id == CODEC_ID_MLP && shorten_by != 0xD234) else if (m->avctx->codec_id == CODEC_ID_MLP && shorten_by != 0xD234)
return -1; return AVERROR_INVALIDDATA;
if (substr == m->max_decoded_substream) if (substr == m->max_decoded_substream)
av_log(m->avctx, AV_LOG_INFO, "End of stream indicated.\n"); av_log(m->avctx, AV_LOG_INFO, "End of stream indicated.\n");
@ -1164,18 +1168,18 @@ next_substr:
rematrix_channels(m, m->max_decoded_substream); rematrix_channels(m, m->max_decoded_substream);
if (output_data(m, m->max_decoded_substream, data, data_size) < 0) if ((ret = output_data(m, m->max_decoded_substream, data, data_size)) < 0)
return -1; return ret;
return length; return length;
substream_length_mismatch: substream_length_mismatch:
av_log(m->avctx, AV_LOG_ERROR, "substream %d length mismatch\n", substr); av_log(m->avctx, AV_LOG_ERROR, "substream %d length mismatch\n", substr);
return -1; return AVERROR_INVALIDDATA;
error: error:
m->params_valid = 0; m->params_valid = 0;
return -1; return AVERROR_INVALIDDATA;
} }
AVCodec ff_mlp_decoder = { AVCodec ff_mlp_decoder = {

View File

@ -236,7 +236,7 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
/** /**
* Read PCM samples macro * Read PCM samples macro
* @param type Datatype of native machine format * @param size Data size of native machine format
* @param endian bytestream_get_xxx() endian suffix * @param endian bytestream_get_xxx() endian suffix
* @param src Source pointer (variable name) * @param src Source pointer (variable name)
* @param dst Destination pointer (variable name) * @param dst Destination pointer (variable name)
@ -244,13 +244,12 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
* @param shift Bitshift (bits) * @param shift Bitshift (bits)
* @param offset Sample value offset * @param offset Sample value offset
*/ */
#define DECODE(type, endian, src, dst, n, shift, offset) \ #define DECODE(size, endian, src, dst, n, shift, offset) \
dst_##type = (type*)dst; \
for(;n>0;n--) { \ for(;n>0;n--) { \
register type v = bytestream_get_##endian(&src); \ uint##size##_t v = bytestream_get_##endian(&src); \
*dst_##type++ = (v - offset) << shift; \ AV_WN##size##A(dst, (v - offset) << shift); \
} \ dst += size / 8; \
dst = (short*)dst_##type; }
static int pcm_decode_frame(AVCodecContext *avctx, static int pcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size, void *data, int *data_size,
@ -260,14 +259,9 @@ static int pcm_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
PCMDecode *s = avctx->priv_data; PCMDecode *s = avctx->priv_data;
int sample_size, c, n, i; int sample_size, c, n, i;
short *samples; uint8_t *samples;
const uint8_t *src, *src8, *src2[MAX_CHANNELS]; const uint8_t *src, *src8, *src2[MAX_CHANNELS];
uint8_t *dstu8;
int16_t *dst_int16_t;
int32_t *dst_int32_t; int32_t *dst_int32_t;
int64_t *dst_int64_t;
uint16_t *dst_uint16_t;
uint32_t *dst_uint32_t;
samples = data; samples = data;
src = buf; src = buf;
@ -314,29 +308,30 @@ static int pcm_decode_frame(AVCodecContext *avctx,
switch(avctx->codec->id) { switch(avctx->codec->id) {
case CODEC_ID_PCM_U32LE: case CODEC_ID_PCM_U32LE:
DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) DECODE(32, le32, src, samples, n, 0, 0x80000000)
break; break;
case CODEC_ID_PCM_U32BE: case CODEC_ID_PCM_U32BE:
DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) DECODE(32, be32, src, samples, n, 0, 0x80000000)
break; break;
case CODEC_ID_PCM_S24LE: case CODEC_ID_PCM_S24LE:
DECODE(int32_t, le24, src, samples, n, 8, 0) DECODE(32, le24, src, samples, n, 8, 0)
break; break;
case CODEC_ID_PCM_S24BE: case CODEC_ID_PCM_S24BE:
DECODE(int32_t, be24, src, samples, n, 8, 0) DECODE(32, be24, src, samples, n, 8, 0)
break; break;
case CODEC_ID_PCM_U24LE: case CODEC_ID_PCM_U24LE:
DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) DECODE(32, le24, src, samples, n, 8, 0x800000)
break; break;
case CODEC_ID_PCM_U24BE: case CODEC_ID_PCM_U24BE:
DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) DECODE(32, be24, src, samples, n, 8, 0x800000)
break; break;
case CODEC_ID_PCM_S24DAUD: case CODEC_ID_PCM_S24DAUD:
for(;n>0;n--) { for(;n>0;n--) {
uint32_t v = bytestream_get_be24(&src); uint32_t v = bytestream_get_be24(&src);
v >>= 4; // sync flags are here v >>= 4; // sync flags are here
*samples++ = av_reverse[(v >> 8) & 0xff] + AV_WN16A(samples, av_reverse[(v >> 8) & 0xff] +
(av_reverse[v & 0xff] << 8); (av_reverse[v & 0xff] << 8));
samples += 2;
} }
break; break;
case CODEC_ID_PCM_S16LE_PLANAR: case CODEC_ID_PCM_S16LE_PLANAR:
@ -344,33 +339,33 @@ static int pcm_decode_frame(AVCodecContext *avctx,
for(c=0;c<avctx->channels;c++) for(c=0;c<avctx->channels;c++)
src2[c] = &src[c*n*2]; src2[c] = &src[c*n*2];
for(;n>0;n--) for(;n>0;n--)
for(c=0;c<avctx->channels;c++) for(c=0;c<avctx->channels;c++) {
*samples++ = bytestream_get_le16(&src2[c]); AV_WN16A(samples, bytestream_get_le16(&src2[c]));
samples += 2;
}
src = src2[avctx->channels-1]; src = src2[avctx->channels-1];
break; break;
case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16LE:
DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) DECODE(16, le16, src, samples, n, 0, 0x8000)
break; break;
case CODEC_ID_PCM_U16BE: case CODEC_ID_PCM_U16BE:
DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) DECODE(16, be16, src, samples, n, 0, 0x8000)
break; break;
case CODEC_ID_PCM_S8: case CODEC_ID_PCM_S8:
dstu8= (uint8_t*)samples;
for(;n>0;n--) { for(;n>0;n--) {
*dstu8++ = *src++ + 128; *samples++ = *src++ + 128;
} }
samples= (short*)dstu8;
break; break;
#if HAVE_BIGENDIAN #if HAVE_BIGENDIAN
case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F64LE:
DECODE(int64_t, le64, src, samples, n, 0, 0) DECODE(64, le64, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S32LE:
case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_F32LE:
DECODE(int32_t, le32, src, samples, n, 0, 0) DECODE(32, le32, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16LE:
DECODE(int16_t, le16, src, samples, n, 0, 0) DECODE(16, le16, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F64BE:
case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_F32BE:
@ -378,14 +373,14 @@ static int pcm_decode_frame(AVCodecContext *avctx,
case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_S16BE:
#else #else
case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F64BE:
DECODE(int64_t, be64, src, samples, n, 0, 0) DECODE(64, be64, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_F32BE:
case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S32BE:
DECODE(int32_t, be32, src, samples, n, 0, 0) DECODE(32, be32, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_S16BE:
DECODE(int16_t, be16, src, samples, n, 0, 0) DECODE(16, be16, src, samples, n, 0, 0)
break; break;
case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F64LE:
case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_F32LE:
@ -395,20 +390,22 @@ static int pcm_decode_frame(AVCodecContext *avctx,
case CODEC_ID_PCM_U8: case CODEC_ID_PCM_U8:
memcpy(samples, src, n*sample_size); memcpy(samples, src, n*sample_size);
src += n*sample_size; src += n*sample_size;
samples = (short*)((uint8_t*)data + n*sample_size); samples += n * sample_size;
break; break;
case CODEC_ID_PCM_ZORK: case CODEC_ID_PCM_ZORK:
for(;n>0;n--) { for(;n>0;n--) {
int x= *src++; int x= *src++;
if(x&128) x-= 128; if(x&128) x-= 128;
else x = -x; else x = -x;
*samples++ = x << 8; AV_WN16A(samples, x << 8);
samples += 2;
} }
break; break;
case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW: case CODEC_ID_PCM_MULAW:
for(;n>0;n--) { for(;n>0;n--) {
*samples++ = s->table[*src++]; AV_WN16A(samples, s->table[*src++]);
samples += 2;
} }
break; break;
case CODEC_ID_PCM_DVD: case CODEC_ID_PCM_DVD:
@ -443,7 +440,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
avctx->bits_per_coded_sample); avctx->bits_per_coded_sample);
return -1; return -1;
} }
samples = (short *) dst_int32_t; samples = (uint8_t *) dst_int32_t;
break; break;
case CODEC_ID_PCM_LXF: case CODEC_ID_PCM_LXF:
dst_int32_t = data; dst_int32_t = data;
@ -463,12 +460,12 @@ static int pcm_decode_frame(AVCodecContext *avctx,
} }
} }
src += n * avctx->channels * 5; src += n * avctx->channels * 5;
samples = (short *) dst_int32_t; samples = (uint8_t *) dst_int32_t;
break; break;
default: default:
return -1; return -1;
} }
*data_size = (uint8_t *)samples - (uint8_t *)data; *data_size = samples - (uint8_t *)data;
return src - buf; return src - buf;
} }

View File

@ -534,13 +534,6 @@ static av_cold int shorten_decode_close(AVCodecContext *avctx)
return 0; return 0;
} }
static void shorten_flush(AVCodecContext *avctx){
ShortenContext *s = avctx->priv_data;
s->bitstream_size=
s->bitstream_index= 0;
}
AVCodec ff_shorten_decoder = { AVCodec ff_shorten_decoder = {
.name = "shorten", .name = "shorten",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
@ -549,6 +542,5 @@ AVCodec ff_shorten_decoder = {
.init = shorten_decode_init, .init = shorten_decode_init,
.close = shorten_decode_close, .close = shorten_decode_close,
.decode = shorten_decode_frame, .decode = shorten_decode_frame,
.flush= shorten_flush,
.long_name= NULL_IF_CONFIG_SMALL("Shorten"), .long_name= NULL_IF_CONFIG_SMALL("Shorten"),
}; };

View File

@ -21,6 +21,8 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
#include "truespeech_data.h" #include "truespeech_data.h"
/** /**
@ -32,7 +34,9 @@
* TrueSpeech decoder context * TrueSpeech decoder context
*/ */
typedef struct { typedef struct {
DSPContext dsp;
/* input data */ /* input data */
uint8_t buffer[32];
int16_t vector[8]; ///< input vector: 5/5/4/4/4/3/3/3 int16_t vector[8]; ///< input vector: 5/5/4/4/4/3/3/3
int offset1[2]; ///< 8-bit value, used in one copying offset int offset1[2]; ///< 8-bit value, used in one copying offset
int offset2[4]; ///< 7-bit value, encodes offsets for copying and for two-point filter int offset2[4]; ///< 7-bit value, encodes offsets for copying and for two-point filter
@ -54,100 +58,66 @@ typedef struct {
static av_cold int truespeech_decode_init(AVCodecContext * avctx) static av_cold int truespeech_decode_init(AVCodecContext * avctx)
{ {
// TSContext *c = avctx->priv_data; TSContext *c = avctx->priv_data;
if (avctx->channels != 1) {
av_log_ask_for_sample(avctx, "Unsupported channel count: %d\n", avctx->channels);
return AVERROR(EINVAL);
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
dsputil_init(&c->dsp, avctx);
return 0; return 0;
} }
static void truespeech_read_frame(TSContext *dec, const uint8_t *input) static void truespeech_read_frame(TSContext *dec, const uint8_t *input)
{ {
uint32_t t; GetBitContext gb;
/* first dword */ dec->dsp.bswap_buf((uint32_t *)dec->buffer, (const uint32_t *)input, 8);
t = AV_RL32(input); init_get_bits(&gb, dec->buffer, 32 * 8);
input += 4;
dec->flag = t & 1; dec->vector[7] = ts_codebook[7][get_bits(&gb, 3)];
dec->vector[6] = ts_codebook[6][get_bits(&gb, 3)];
dec->vector[5] = ts_codebook[5][get_bits(&gb, 3)];
dec->vector[4] = ts_codebook[4][get_bits(&gb, 4)];
dec->vector[3] = ts_codebook[3][get_bits(&gb, 4)];
dec->vector[2] = ts_codebook[2][get_bits(&gb, 4)];
dec->vector[1] = ts_codebook[1][get_bits(&gb, 5)];
dec->vector[0] = ts_codebook[0][get_bits(&gb, 5)];
dec->flag = get_bits1(&gb);
dec->vector[0] = ts_codebook[0][(t >> 1) & 0x1F]; dec->offset1[0] = get_bits(&gb, 4) << 4;
dec->vector[1] = ts_codebook[1][(t >> 6) & 0x1F]; dec->offset2[3] = get_bits(&gb, 7);
dec->vector[2] = ts_codebook[2][(t >> 11) & 0xF]; dec->offset2[2] = get_bits(&gb, 7);
dec->vector[3] = ts_codebook[3][(t >> 15) & 0xF]; dec->offset2[1] = get_bits(&gb, 7);
dec->vector[4] = ts_codebook[4][(t >> 19) & 0xF]; dec->offset2[0] = get_bits(&gb, 7);
dec->vector[5] = ts_codebook[5][(t >> 23) & 0x7];
dec->vector[6] = ts_codebook[6][(t >> 26) & 0x7];
dec->vector[7] = ts_codebook[7][(t >> 29) & 0x7];
/* second dword */ dec->offset1[1] = get_bits(&gb, 4);
t = AV_RL32(input); dec->pulseval[1] = get_bits(&gb, 14);
input += 4; dec->pulseval[0] = get_bits(&gb, 14);
dec->offset2[0] = (t >> 0) & 0x7F; dec->offset1[1] |= get_bits(&gb, 4) << 4;
dec->offset2[1] = (t >> 7) & 0x7F; dec->pulseval[3] = get_bits(&gb, 14);
dec->offset2[2] = (t >> 14) & 0x7F; dec->pulseval[2] = get_bits(&gb, 14);
dec->offset2[3] = (t >> 21) & 0x7F;
dec->offset1[0] = ((t >> 28) & 0xF) << 4; dec->offset1[0] |= get_bits1(&gb);
dec->pulsepos[0] = get_bits_long(&gb, 27);
dec->pulseoff[0] = get_bits(&gb, 4);
/* third dword */ dec->offset1[0] |= get_bits1(&gb) << 1;
t = AV_RL32(input); dec->pulsepos[1] = get_bits_long(&gb, 27);
input += 4; dec->pulseoff[1] = get_bits(&gb, 4);
dec->pulseval[0] = (t >> 0) & 0x3FFF; dec->offset1[0] |= get_bits1(&gb) << 2;
dec->pulseval[1] = (t >> 14) & 0x3FFF; dec->pulsepos[2] = get_bits_long(&gb, 27);
dec->pulseoff[2] = get_bits(&gb, 4);
dec->offset1[1] = (t >> 28) & 0x0F;
/* fourth dword */
t = AV_RL32(input);
input += 4;
dec->pulseval[2] = (t >> 0) & 0x3FFF;
dec->pulseval[3] = (t >> 14) & 0x3FFF;
dec->offset1[1] |= ((t >> 28) & 0x0F) << 4;
/* fifth dword */
t = AV_RL32(input);
input += 4;
dec->pulsepos[0] = (t >> 4) & 0x7FFFFFF;
dec->pulseoff[0] = (t >> 0) & 0xF;
dec->offset1[0] |= (t >> 31) & 1;
/* sixth dword */
t = AV_RL32(input);
input += 4;
dec->pulsepos[1] = (t >> 4) & 0x7FFFFFF;
dec->pulseoff[1] = (t >> 0) & 0xF;
dec->offset1[0] |= ((t >> 31) & 1) << 1;
/* seventh dword */
t = AV_RL32(input);
input += 4;
dec->pulsepos[2] = (t >> 4) & 0x7FFFFFF;
dec->pulseoff[2] = (t >> 0) & 0xF;
dec->offset1[0] |= ((t >> 31) & 1) << 2;
/* eighth dword */
t = AV_RL32(input);
input += 4;
dec->pulsepos[3] = (t >> 4) & 0x7FFFFFF;
dec->pulseoff[3] = (t >> 0) & 0xF;
dec->offset1[0] |= ((t >> 31) & 1) << 3;
dec->offset1[0] |= get_bits1(&gb) << 3;
dec->pulsepos[3] = get_bits_long(&gb, 27);
dec->pulseoff[3] = get_bits(&gb, 4);
} }
static void truespeech_correlate_filter(TSContext *dec) static void truespeech_correlate_filter(TSContext *dec)
@ -157,7 +127,7 @@ static void truespeech_correlate_filter(TSContext *dec)
for(i = 0; i < 8; i++){ for(i = 0; i < 8; i++){
if(i > 0){ if(i > 0){
memcpy(tmp, dec->cvector, i * 2); memcpy(tmp, dec->cvector, i * sizeof(*tmp));
for(j = 0; j < i; j++) for(j = 0; j < i; j++)
dec->cvector[j] = ((tmp[i - j - 1] * dec->vector[i]) + dec->cvector[j] = ((tmp[i - j - 1] * dec->vector[i]) +
(dec->cvector[j] << 15) + 0x4000) >> 15; (dec->cvector[j] << 15) + 0x4000) >> 15;
@ -199,7 +169,7 @@ static void truespeech_apply_twopoint_filter(TSContext *dec, int quart)
t = dec->offset2[quart]; t = dec->offset2[quart];
if(t == 127){ if(t == 127){
memset(dec->newvec, 0, 60 * 2); memset(dec->newvec, 0, 60 * sizeof(*dec->newvec));
return; return;
} }
for(i = 0; i < 146; i++) for(i = 0; i < 146; i++)
@ -224,7 +194,7 @@ static void truespeech_place_pulses(TSContext *dec, int16_t *out, int quart)
int16_t *ptr2; int16_t *ptr2;
int coef; int coef;
memset(out, 0, 60 * 2); memset(out, 0, 60 * sizeof(*out));
for(i = 0; i < 7; i++) { for(i = 0; i < 7; i++) {
t = dec->pulseval[quart] & 3; t = dec->pulseval[quart] & 3;
dec->pulseval[quart] >>= 2; dec->pulseval[quart] >>= 2;
@ -340,45 +310,45 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
int i, j; int i, j;
short *samples = data; short *samples = data;
int consumed = 0; int iterations, out_size;
int16_t out_buf[240];
int iterations;
if (!buf_size) iterations = buf_size / 32;
return 0;
if (buf_size < 32) { if (!iterations) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Too small input buffer (%d bytes), need at least 32 bytes\n", buf_size); "Too small input buffer (%d bytes), need at least 32 bytes\n", buf_size);
return -1; return -1;
} }
iterations = FFMIN(buf_size / 32, *data_size / 480);
out_size = iterations * 240 * av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < out_size) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
memset(samples, 0, out_size);
for(j = 0; j < iterations; j++) { for(j = 0; j < iterations; j++) {
truespeech_read_frame(c, buf + consumed); truespeech_read_frame(c, buf);
consumed += 32; buf += 32;
truespeech_correlate_filter(c); truespeech_correlate_filter(c);
truespeech_filters_merge(c); truespeech_filters_merge(c);
memset(out_buf, 0, 240 * 2);
for(i = 0; i < 4; i++) { for(i = 0; i < 4; i++) {
truespeech_apply_twopoint_filter(c, i); truespeech_apply_twopoint_filter(c, i);
truespeech_place_pulses(c, out_buf + i * 60, i); truespeech_place_pulses (c, samples, i);
truespeech_update_filters(c, out_buf + i * 60, i); truespeech_update_filters(c, samples, i);
truespeech_synth(c, out_buf + i * 60, i); truespeech_synth (c, samples, i);
samples += 60;
} }
truespeech_save_prevvec(c); truespeech_save_prevvec(c);
/* finally output decoded frame */
for(i = 0; i < 240; i++)
*samples++ = out_buf[i];
} }
*data_size = consumed * 15; *data_size = out_size;
return consumed; return buf_size;
} }
AVCodec ff_truespeech_decoder = { AVCodec ff_truespeech_decoder = {

View File

@ -837,17 +837,18 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
goto parse_common_info; goto parse_common_info;
} }
v->field_mode = 0;
if (v->interlace) { if (v->interlace) {
v->fcm = decode012(gb); v->fcm = decode012(gb);
if (v->fcm) { if (v->fcm) {
if (v->fcm == 2) if (v->fcm == 2)
v->field_mode = 1; v->field_mode = 1;
else
v->field_mode = 0;
if (!v->warn_interlaced++) if (!v->warn_interlaced++)
av_log(v->s.avctx, AV_LOG_ERROR, av_log(v->s.avctx, AV_LOG_ERROR,
"Interlaced frames/fields support is incomplete\n"); "Interlaced frames/fields support is incomplete\n");
} }
} else {
v->fcm = 0;
} }
if (v->field_mode) { if (v->field_mode) {

View File

@ -274,9 +274,10 @@ typedef struct AVFormatParameters {
#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ #define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */
#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fallback to binary search via read_timestamp */ #define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fallback to binary search via read_timestamp */
#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fallback to generic search */ #define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fallback to generic search */
#define AVFMT_TS_NONSTRICT 0x8000 /**< Format does not require strictly #define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */
increasing timestamps, but they must #define AVFMT_TS_NONSTRICT 0x8000000 /**< Format does not require strictly
still be monotonic */ increasing timestamps, but they must
still be monotonic */
typedef struct AVOutputFormat { typedef struct AVOutputFormat {
const char *name; const char *name;
@ -411,7 +412,9 @@ typedef struct AVInputFormat {
int64_t *pos, int64_t pos_limit); int64_t *pos, int64_t pos_limit);
/** /**
* Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER. * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,
* AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
* AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK.
*/ */
int flags; int flags;

View File

@ -33,6 +33,66 @@
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "avio_internal.h" #include "avio_internal.h"
const AVMetadataConv ff_id3v2_34_metadata_conv[] = {
{ "TALB", "album"},
{ "TCOM", "composer"},
{ "TCON", "genre"},
{ "TCOP", "copyright"},
{ "TENC", "encoded_by"},
{ "TIT2", "title"},
{ "TLAN", "language"},
{ "TPE1", "artist"},
{ "TPE2", "album_artist"},
{ "TPE3", "performer"},
{ "TPOS", "disc"},
{ "TPUB", "publisher"},
{ "TRCK", "track"},
{ "TSSE", "encoder"},
{ 0 }
};
const AVMetadataConv ff_id3v2_4_metadata_conv[] = {
{ "TDRL", "date"},
{ "TDRC", "date"},
{ "TDEN", "creation_time"},
{ "TSOA", "album-sort"},
{ "TSOP", "artist-sort"},
{ "TSOT", "title-sort"},
{ 0 }
};
static const AVMetadataConv id3v2_2_metadata_conv[] = {
{ "TAL", "album"},
{ "TCO", "genre"},
{ "TT2", "title"},
{ "TEN", "encoded_by"},
{ "TP1", "artist"},
{ "TP2", "album_artist"},
{ "TP3", "performer"},
{ "TRK", "track"},
{ 0 }
};
const char ff_id3v2_tags[][4] = {
"TALB", "TBPM", "TCOM", "TCON", "TCOP", "TDLY", "TENC", "TEXT",
"TFLT", "TIT1", "TIT2", "TIT3", "TKEY", "TLAN", "TLEN", "TMED",
"TOAL", "TOFN", "TOLY", "TOPE", "TOWN", "TPE1", "TPE2", "TPE3",
"TPE4", "TPOS", "TPUB", "TRCK", "TRSN", "TRSO", "TSRC", "TSSE",
{ 0 },
};
const char ff_id3v2_4_tags[][4] = {
"TDEN", "TDOR", "TDRC", "TDRL", "TDTG", "TIPL", "TMCL", "TMOO",
"TPRO", "TSOA", "TSOP", "TSOT", "TSST",
{ 0 },
};
const char ff_id3v2_3_tags[][4] = {
"TDAT", "TIME", "TORY", "TRDA", "TSIZ", "TYER",
{ 0 },
};
int ff_id3v2_match(const uint8_t *buf, const char * magic) int ff_id3v2_match(const uint8_t *buf, const char * magic)
{ {
return buf[0] == magic[0] && return buf[0] == magic[0] &&
@ -328,6 +388,18 @@ finish:
av_dict_set(m, "date", date, 0); av_dict_set(m, "date", date, 0);
} }
typedef struct ID3v2EMFunc {
const char *tag3;
const char *tag4;
void (*read)(AVFormatContext*, AVIOContext*, int, char*, ID3v2ExtraMeta **);
void (*free)(void *);
} ID3v2EMFunc;
static const ID3v2EMFunc id3v2_extra_meta_funcs[] = {
{ "GEO", "GEOB", read_geobtag, free_geobtag },
{ NULL }
};
/** /**
* Get the corresponding ID3v2EMFunc struct for a tag. * Get the corresponding ID3v2EMFunc struct for a tag.
* @param isv34 Determines if v2.2 or v2.3/4 strings are used * @param isv34 Determines if v2.2 or v2.3/4 strings are used
@ -336,16 +408,15 @@ finish:
static const ID3v2EMFunc *get_extra_meta_func(const char *tag, int isv34) static const ID3v2EMFunc *get_extra_meta_func(const char *tag, int isv34)
{ {
int i = 0; int i = 0;
while (ff_id3v2_extra_meta_funcs[i].tag3) { while (id3v2_extra_meta_funcs[i].tag3) {
if (!memcmp(tag, if (!memcmp(tag,
(isv34 ? (isv34 ? id3v2_extra_meta_funcs[i].tag4 :
ff_id3v2_extra_meta_funcs[i].tag4 : id3v2_extra_meta_funcs[i].tag3),
ff_id3v2_extra_meta_funcs[i].tag3),
(isv34 ? 4 : 3))) (isv34 ? 4 : 3)))
return &ff_id3v2_extra_meta_funcs[i]; return &id3v2_extra_meta_funcs[i];
i++; i++;
} }
return &ff_id3v2_extra_meta_funcs[i]; return &id3v2_extra_meta_funcs[i];
} }
static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t flags, ID3v2ExtraMeta **extra_meta) static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t flags, ID3v2ExtraMeta **extra_meta)
@ -508,7 +579,7 @@ void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **e
} }
} while (found_header); } while (found_header);
ff_metadata_conv(&s->metadata, NULL, ff_id3v2_34_metadata_conv); ff_metadata_conv(&s->metadata, NULL, ff_id3v2_34_metadata_conv);
ff_metadata_conv(&s->metadata, NULL, ff_id3v2_2_metadata_conv); ff_metadata_conv(&s->metadata, NULL, id3v2_2_metadata_conv);
ff_metadata_conv(&s->metadata, NULL, ff_id3v2_4_metadata_conv); ff_metadata_conv(&s->metadata, NULL, ff_id3v2_4_metadata_conv);
merge_date(&s->metadata); merge_date(&s->metadata);
} }
@ -531,68 +602,3 @@ void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
current = next; current = next;
} }
} }
const ID3v2EMFunc ff_id3v2_extra_meta_funcs[] = {
{ "GEO", "GEOB", read_geobtag, free_geobtag },
{ NULL, NULL, NULL, NULL }
};
const AVMetadataConv ff_id3v2_34_metadata_conv[] = {
{ "TALB", "album"},
{ "TCOM", "composer"},
{ "TCON", "genre"},
{ "TCOP", "copyright"},
{ "TENC", "encoded_by"},
{ "TIT2", "title"},
{ "TLAN", "language"},
{ "TPE1", "artist"},
{ "TPE2", "album_artist"},
{ "TPE3", "performer"},
{ "TPOS", "disc"},
{ "TPUB", "publisher"},
{ "TRCK", "track"},
{ "TSSE", "encoder"},
{ 0 }
};
const AVMetadataConv ff_id3v2_4_metadata_conv[] = {
{ "TDRL", "date"},
{ "TDRC", "date"},
{ "TDEN", "creation_time"},
{ "TSOA", "album-sort"},
{ "TSOP", "artist-sort"},
{ "TSOT", "title-sort"},
{ 0 }
};
const AVMetadataConv ff_id3v2_2_metadata_conv[] = {
{ "TAL", "album"},
{ "TCO", "genre"},
{ "TT2", "title"},
{ "TEN", "encoded_by"},
{ "TP1", "artist"},
{ "TP2", "album_artist"},
{ "TP3", "performer"},
{ "TRK", "track"},
{ 0 }
};
const char ff_id3v2_tags[][4] = {
"TALB", "TBPM", "TCOM", "TCON", "TCOP", "TDLY", "TENC", "TEXT",
"TFLT", "TIT1", "TIT2", "TIT3", "TKEY", "TLAN", "TLEN", "TMED",
"TOAL", "TOFN", "TOLY", "TOPE", "TOWN", "TPE1", "TPE2", "TPE3",
"TPE4", "TPOS", "TPUB", "TRCK", "TRSN", "TRSO", "TSRC", "TSSE",
{ 0 },
};
const char ff_id3v2_4_tags[][4] = {
"TDEN", "TDOR", "TDRC", "TDRL", "TDTG", "TIPL", "TMCL", "TMOO",
"TPRO", "TSOA", "TSOP", "TSOT", "TSST",
{ 0 },
};
const char ff_id3v2_3_tags[][4] = {
"TDAT", "TIME", "TORY", "TRDA", "TSIZ", "TYER",
{ 0 },
};

View File

@ -59,13 +59,6 @@ typedef struct ID3v2ExtraMetaGEOB {
uint8_t *data; uint8_t *data;
} ID3v2ExtraMetaGEOB; } ID3v2ExtraMetaGEOB;
typedef struct ID3v2EMFunc {
const char *tag3;
const char *tag4;
void (*read)(AVFormatContext*, AVIOContext*, int, char*, ID3v2ExtraMeta **);
void (*free)(void *);
} ID3v2EMFunc;
/** /**
* Detect ID3v2 Header. * Detect ID3v2 Header.
* @param buf must be ID3v2_HEADER_SIZE byte long * @param buf must be ID3v2_HEADER_SIZE byte long
@ -99,11 +92,8 @@ void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **e
*/ */
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta); void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta);
extern const ID3v2EMFunc ff_id3v2_extra_meta_funcs[];
extern const AVMetadataConv ff_id3v2_34_metadata_conv[]; extern const AVMetadataConv ff_id3v2_34_metadata_conv[];
extern const AVMetadataConv ff_id3v2_4_metadata_conv[]; extern const AVMetadataConv ff_id3v2_4_metadata_conv[];
extern const AVMetadataConv ff_id3v2_2_metadata_conv[];
/** /**
* A list of text information frames allowed in both ID3 v2.3 and v2.4 * A list of text information frames allowed in both ID3 v2.3 and v2.4

View File

@ -245,7 +245,7 @@ AVInputFormat ff_shorten_demuxer = {
.long_name = NULL_IF_CONFIG_SMALL("raw Shorten"), .long_name = NULL_IF_CONFIG_SMALL("raw Shorten"),
.read_header = ff_raw_audio_read_header, .read_header = ff_raw_audio_read_header,
.read_packet = ff_raw_read_partial_packet, .read_packet = ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX, .flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK,
.extensions = "shn", .extensions = "shn",
.value = CODEC_ID_SHORTEN, .value = CODEC_ID_SHORTEN,
}; };

View File

@ -1808,10 +1808,12 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
int ret; int ret;
AVStream *st; AVStream *st;
ff_read_frame_flush(s); if (flags & AVSEEK_FLAG_BYTE) {
if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
if(flags & AVSEEK_FLAG_BYTE) return -1;
ff_read_frame_flush(s);
return seek_frame_byte(s, stream_index, timestamp, flags); return seek_frame_byte(s, stream_index, timestamp, flags);
}
if(stream_index < 0){ if(stream_index < 0){
stream_index= av_find_default_stream_index(s); stream_index= av_find_default_stream_index(s);
@ -1825,19 +1827,23 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
/* first, we try the format specific seek */ /* first, we try the format specific seek */
AV_NOWARN_DEPRECATED( AV_NOWARN_DEPRECATED(
if (s->iformat->read_seek) if (s->iformat->read_seek) {
ff_read_frame_flush(s);
ret = s->iformat->read_seek(s, stream_index, timestamp, flags); ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
else } else
ret = -1; ret = -1;
) )
if (ret >= 0) { if (ret >= 0) {
return 0; return 0;
} }
if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
ff_read_frame_flush(s);
return av_seek_frame_binary(s, stream_index, timestamp, flags); return av_seek_frame_binary(s, stream_index, timestamp, flags);
else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
ff_read_frame_flush(s);
return seek_frame_generic(s, stream_index, timestamp, flags); return seek_frame_generic(s, stream_index, timestamp, flags);
}
else else
return -1; return -1;
} }
@ -1847,10 +1853,10 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int
if(min_ts > ts || max_ts < ts) if(min_ts > ts || max_ts < ts)
return -1; return -1;
ff_read_frame_flush(s); if (s->iformat->read_seek2) {
ff_read_frame_flush(s);
if (s->iformat->read_seek2)
return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
}
if(s->iformat->read_timestamp){ if(s->iformat->read_timestamp){
//try to seek via read_timestamp() //try to seek via read_timestamp()

View File

@ -24,7 +24,7 @@
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 53 #define LIBAVFORMAT_VERSION_MAJOR 53
#define LIBAVFORMAT_VERSION_MINOR 15 #define LIBAVFORMAT_VERSION_MINOR 16
#define LIBAVFORMAT_VERSION_MICRO 0 #define LIBAVFORMAT_VERSION_MICRO 0
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \

View File

@ -40,6 +40,7 @@ include $(SRC_PATH)/tests/fate/fft.mak
include $(SRC_PATH)/tests/fate/h264.mak include $(SRC_PATH)/tests/fate/h264.mak
include $(SRC_PATH)/tests/fate/libavutil.mak include $(SRC_PATH)/tests/fate/libavutil.mak
include $(SRC_PATH)/tests/fate/mp3.mak include $(SRC_PATH)/tests/fate/mp3.mak
#include $(SRC_PATH)/tests/fate/prores.mak
include $(SRC_PATH)/tests/fate/vorbis.mak include $(SRC_PATH)/tests/fate/vorbis.mak
include $(SRC_PATH)/tests/fate/vp8.mak include $(SRC_PATH)/tests/fate/vp8.mak

15
tests/fate/prores.mak Normal file
View File

@ -0,0 +1,15 @@
FATE_PRORES = fate-prores-422 \
fate-prores-422_hq \
fate-prores-422_lt \
fate-prores-422_proxy \
fate-prores-alpha \
FATE_TESTS += $(FATE_PRORES)
fate-prores: $(FATE_PRORES)
fate-prores-422: CMD = framecrc -vsync 0 -i $(SAMPLES)/prores/Sequence_1-Apple_ProRes_422.mov
fate-prores-422_hq: CMD = framecrc -vsync 0 -i $(SAMPLES)/prores/Sequence_1-Apple_ProRes_422_HQ.mov
fate-prores-422_lt: CMD = framecrc -vsync 0 -i $(SAMPLES)/prores/Sequence_1-Apple_ProRes_422_LT.mov
fate-prores-422_proxy: CMD = framecrc -vsync 0 -i $(SAMPLES)/prores/Sequence_1-Apple_ProRes_422_Proxy.mov
fate-prores-alpha: CMD = framecrc -vsync 0 -i $(SAMPLES)/prores/Sequence_1-Apple_ProRes_with_Alpha.mov

View File

@ -0,0 +1,2 @@
0, 0, 8294400, 0xe8e9d448
0, 3003, 8294400, 0xe8e9d448

View File

@ -0,0 +1,2 @@
0, 0, 8294400, 0x817063b0
0, 3003, 8294400, 0x817063b0

View File

@ -0,0 +1,2 @@
0, 0, 8294400, 0xcd4ccde1
0, 3003, 8294400, 0xcd4ccde1

View File

@ -0,0 +1,2 @@
0, 0, 8294400, 0x51d29320
0, 3003, 8294400, 0x51d29320

View File

@ -0,0 +1,2 @@
0, 0, 8294400, 0xee48d74b
0, 3003, 8294400, 0x2a0c7eb1