1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master: (34 commits)
  dpcm: return error if packet is too small
  dpcm: use smaller data types for static tables
  dpcm: use sol_table_16 directly instead of through the DPCMContext.
  dpcm: replace short with int16_t
  dpcm: check to make sure channels is 1 or 2.
  dpcm: misc pretty-printing
  dpcm: remove unnecessary variable by using bytestream functions.
  dpcm: move codec-specific variable declarations to their corresponding decoding blocks.
  dpcm: consistently use the variable name 'n' for the next input byte.
  dpcm: output AV_SAMPLE_FMT_U8 for Sol DPCM subcodecs 1 and 2.
  dpcm: calculate and check actual output data size prior to decoding.
  dpcm: factor out the stereo flag calculation
  dpcm: cosmetics: rename channel_number to ch
  avserver: Fix a bug where the socket is IPv4, but IPv6 is autoselected for the loopback address.
  lavf: Avoid using av_malloc(0) in av_dump_format
  dxva2_h264: pass the correct 8x8 scaling lists
  dca: NEON optimised high freq VQ decoding
  avcodec: reject audio packets with NULL data and non-zero size
  dxva: Add ability to enable workaround for older ATI cards
  latmenc: Set latmBufferFullness to largest value to indicate it is not used
  ...

Conflicts:
	libavcodec/dxva2_h264.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-10-01 02:54:46 +02:00
commit ef74ab20c2
14 changed files with 365 additions and 301 deletions

View File

@ -522,6 +522,7 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
tmp = 1;
setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
my_addr->sin_family = AF_INET;
if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
char bindmsg[32];
snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port));

View File

@ -42,16 +42,17 @@
* Features and limitations:
*
* Reference documents:
* http://www.pcisys.net/~melanson/codecs/simpleaudio.html
* http://www.geocities.com/SiliconValley/8682/aud3.txt
* http://openquicktime.sourceforge.net/plugins.htm
* XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
* http://www.cs.ucla.edu/~leec/mediabench/applications.html
* SoX source code http://home.sprynet.com/~cbagwell/sox.html
* http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
* http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
* http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
* http://openquicktime.sourceforge.net/
* XAnim sources (xa_codec.c) http://xanim.polter.net/
* http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
* SoX source code http://sox.sourceforge.net/
*
* CD-ROM XA:
* http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html
* vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html
* http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
* vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
* readstr http://www.geocities.co.jp/Playtown/2004/
*/
@ -65,8 +66,11 @@ static const int xa_adpcm_table[5][2] = {
};
static const int ea_adpcm_table[] = {
0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
3, 4, 7, 8, 10, 11, 0, -1, -3, -4
0, 240, 460, 392,
0, 0, -208, -220,
0, 1, 3, 4,
7, 8, 10, 11,
0, -1, -3, -4
};
// padded to zero where table size is less then 16
@ -336,27 +340,12 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
ADPCMDecodeContext *c = avctx->priv_data;
ADPCMChannelStatus *cs;
int n, m, channel, i;
int block_predictor[2];
short *samples;
short *samples_end;
const uint8_t *src;
int st; /* stereo */
/* DK3 ADPCM accounting variables */
unsigned char last_byte = 0;
unsigned char nibble;
int decode_top_nibble_next = 0;
int diff_channel;
/* EA ADPCM state variables */
uint32_t samples_in_chunk;
int32_t previous_left_sample, previous_right_sample;
int32_t current_left_sample, current_right_sample;
int32_t next_left_sample, next_right_sample;
int32_t coeff1l, coeff2l, coeff1r, coeff2r;
uint8_t shift_left, shift_right;
int count1, count2;
int coeff[2][2], shift[2];//used in EA MAXIS ADPCM
if (!buf_size)
return 0;
@ -376,7 +365,12 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
switch(avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_QT:
n = buf_size - 2*avctx->channels;
/* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
Channel data is interleaved per-chunk. */
if (buf_size / 34 < avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL);
}
for (channel = 0; channel < avctx->channels; channel++) {
int16_t predictor;
int step_index;
@ -409,7 +403,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
samples = (short*)data + channel;
for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
for (m = 0; m < 32; m++) {
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
samples += avctx->channels;
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3);
@ -439,60 +433,66 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
}
while(src < buf + buf_size){
for (i = 0; i < avctx->channels; i++) {
cs = &c->status[i];
for (m = 0; m < 4; m++) {
for(i=0; i<=st; i++)
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
for(i=0; i<=st; i++)
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
src++;
uint8_t v = *src++;
*samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
samples += avctx->channels;
*samples = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
samples += avctx->channels;
}
src += 4*st;
samples -= 8 * avctx->channels - 1;
}
samples += 7 * avctx->channels;
}
break;
case CODEC_ID_ADPCM_4XM:
cs = &(c->status[0]);
c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
if(st){
c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
for (i = 0; i < avctx->channels; i++)
c->status[i].predictor= (int16_t)bytestream_get_le16(&src);
for (i = 0; i < avctx->channels; i++) {
c->status[i].step_index= (int16_t)bytestream_get_le16(&src);
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
}
c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
if(st){
c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
}
if (cs->step_index < 0) cs->step_index = 0;
if (cs->step_index > 88) cs->step_index = 88;
m= (buf_size - (src - buf))>>st;
for(i=0; i<m; i++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
for (i = 0; i < avctx->channels; i++) {
samples = (short*)data + i;
cs = &c->status[i];
for (n = 0; n < m; n++) {
uint8_t v = *src++;
*samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
samples += avctx->channels;
*samples = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
samples += avctx->channels;
}
src += m<<st;
}
samples -= (avctx->channels - 1);
break;
case CODEC_ID_ADPCM_MS:
{
int block_predictor;
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
n = buf_size - 7 * avctx->channels;
if (n < 0)
return -1;
block_predictor[0] = av_clip(*src++, 0, 6);
block_predictor[1] = 0;
if (st)
block_predictor[1] = av_clip(*src++, 0, 6);
block_predictor = av_clip(*src++, 0, 6);
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
if (st) {
block_predictor = av_clip(*src++, 0, 6);
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
}
c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
if (st){
c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
}
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[0]];
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[0]];
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[1]];
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[1]];
c->status[0].sample1 = bytestream_get_le16(&src);
if (st) c->status[1].sample1 = bytestream_get_le16(&src);
@ -509,39 +509,37 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
src ++;
}
break;
}
case CODEC_ID_ADPCM_IMA_DK4:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
c->status[0].step_index = *src++;
src++;
*samples++ = c->status[0].predictor;
if (st) {
c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
c->status[1].step_index = *src++;
src++;
*samples++ = c->status[1].predictor;
n = buf_size - 4 * avctx->channels;
if (n < 0) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL);
}
while (src < buf + buf_size) {
/* take care of the top nibble (always left or mono channel) */
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4, 3);
/* take care of the bottom nibble, which is right sample for
* stereo, or another mono sample */
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
else
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = (int16_t)bytestream_get_le16(&src);
cs->step_index = *src++;
src++;
*samples++ = cs->predictor;
}
while (n-- > 0) {
uint8_t v = *src++;
*samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case CODEC_ID_ADPCM_IMA_DK3:
{
unsigned char last_byte = 0;
unsigned char nibble;
int decode_top_nibble_next = 0;
int diff_channel;
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
@ -586,50 +584,41 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
*samples++ = c->status[0].predictor - c->status[1].predictor;
}
break;
}
case CODEC_ID_ADPCM_IMA_ISS:
c->status[0].predictor = (int16_t)AV_RL16(src + 0);
c->status[0].step_index = src[2];
src += 4;
if(st) {
c->status[1].predictor = (int16_t)AV_RL16(src + 0);
c->status[1].step_index = src[2];
src += 4;
}
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
} else {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
n = buf_size - 4 * avctx->channels;
if (n < 0) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL);
}
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = (int16_t)bytestream_get_le16(&src);
cs->step_index = *src++;
src++;
}
while (n-- > 0) {
uint8_t v1, v2;
uint8_t v = *src++;
/* nibbles are swapped for mono */
if (st) {
v1 = v >> 4;
v2 = v & 0x0F;
} else {
v2 = v >> 4;
v1 = v & 0x0F;
}
*samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
}
break;
case CODEC_ID_ADPCM_IMA_WS:
/* no per-block initialization; just start decoding the data */
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
} else {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
}
src++;
uint8_t v = *src++;
*samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case CODEC_ID_ADPCM_XA:
@ -668,6 +657,13 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
}
break;
case CODEC_ID_ADPCM_EA:
{
int32_t previous_left_sample, previous_right_sample;
int32_t current_left_sample, current_right_sample;
int32_t next_left_sample, next_right_sample;
int32_t coeff1l, coeff2l, coeff1r, coeff2r;
uint8_t shift_left, shift_right;
/* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
each coding 28 stereo samples. */
if (buf_size < 12) {
@ -721,7 +717,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
src += 2; // Skip terminating 0x0000
break;
}
case CODEC_ID_ADPCM_EA_MAXIS_XA:
{
int coeff[2][2], shift[2];
for(channel = 0; channel < avctx->channels; channel++) {
for (i=0; i<2; i++)
coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
@ -743,6 +743,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
src+=avctx->channels;
}
break;
}
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3: {
@ -885,18 +886,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
break;
case CODEC_ID_ADPCM_CT:
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] >> 4);
*samples++ = adpcm_ct_expand_nibble(&c->status[1],
src[0] & 0x0F);
} else {
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] >> 4);
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] & 0x0F);
}
src++;
uint8_t v = *src++;
*samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
*samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
}
break;
case CODEC_ID_ADPCM_SBPRO_4:
@ -1004,18 +996,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
}
case CODEC_ID_ADPCM_YAMAHA:
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
src[0] >> 4 );
} else {
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] >> 4 );
}
src++;
uint8_t v = *src++;
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
}
break;
case CODEC_ID_ADPCM_THP:

View File

@ -32,13 +32,7 @@
* Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
* by Mike Melanson (melanson@pcisys.net)
*
* Reference documents:
* http://www.pcisys.net/~melanson/codecs/simpleaudio.html
* http://www.geocities.com/SiliconValley/8682/aud3.txt
* http://openquicktime.sourceforge.net/plugins.htm
* XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
* http://www.cs.ucla.edu/~leec/mediabench/applications.html
* SoX source code http://home.sprynet.com/~cbagwell/sox.html
* See ADPCM decoder reference documents for codec information.
*/
typedef struct TrellisPath {

49
libavcodec/arm/dca.h Normal file
View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_ARM_DCA_H
#define AVCODEC_ARM_DCA_H
#include <stdint.h>
#include "config.h"
#if HAVE_NEON && HAVE_INLINE_ASM
#define int8x8_fmul_int32 int8x8_fmul_int32
static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale)
{
__asm__ ("vcvt.f32.s32 %2, %2, #4 \n"
"vld1.8 {d0}, [%1,:64] \n"
"vmovl.s8 q0, d0 \n"
"vmovl.s16 q1, d1 \n"
"vmovl.s16 q0, d0 \n"
"vcvt.f32.s32 q0, q0 \n"
"vcvt.f32.s32 q1, q1 \n"
"vmul.f32 q0, q0, %y2 \n"
"vmul.f32 q1, q1, %y2 \n"
"vst1.32 {q0-q1}, [%m0,:128] \n"
: "=Um"(*(float (*)[8])dst)
: "r"(src), "x"(scale)
: "d0", "d1", "d2", "d3");
}
#endif
#endif /* AVCODEC_ARM_DCA_H */

View File

@ -42,6 +42,10 @@
#include "dcadsp.h"
#include "fmtconvert.h"
#if ARCH_ARM
# include "arm/dca.h"
#endif
//#define TRACE
#define DCA_PRIM_CHANNELS_MAX (7)
@ -320,7 +324,7 @@ typedef struct {
int lfe_scale_factor;
/* Subband samples history (for ADPCM) */
float subband_samples_hist[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS][4];
DECLARE_ALIGNED(16, float, subband_samples_hist)[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS][4];
DECLARE_ALIGNED(32, float, subband_fir_hist)[DCA_PRIM_CHANNELS_MAX][512];
DECLARE_ALIGNED(32, float, subband_fir_noidea)[DCA_PRIM_CHANNELS_MAX][32];
int hist_index[DCA_PRIM_CHANNELS_MAX];
@ -1057,6 +1061,16 @@ static int decode_blockcode(int code, int levels, int *values)
static const uint8_t abits_sizes[7] = { 7, 10, 12, 13, 15, 17, 19 };
static const uint8_t abits_levels[7] = { 3, 5, 7, 9, 13, 17, 25 };
#ifndef int8x8_fmul_int32
static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale)
{
float fscale = scale / 16.0;
int i;
for (i = 0; i < 8; i++)
dst[i] = src[i] * fscale;
}
#endif
static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
{
int k, l;
@ -1161,19 +1175,16 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
for (l = s->vq_start_subband[k]; l < s->subband_activity[k]; l++) {
/* 1 vector -> 32 samples but we only need the 8 samples
* for this subsubframe. */
int m;
int hfvq = s->high_freq_vq[k][l];
if (!s->debug_flag & 0x01) {
av_log(s->avctx, AV_LOG_DEBUG, "Stream with high frequencies VQ coding\n");
s->debug_flag |= 0x01;
}
for (m = 0; m < 8; m++) {
subband_samples[k][l][m] =
high_freq_vq[s->high_freq_vq[k][l]][subsubframe * 8 +
m]
* (float) s->scale_factor[k][l][0] / 16.0;
}
int8x8_fmul_int32(subband_samples[k][l],
&high_freq_vq[hfvq][subsubframe * 8],
s->scale_factor[k][l][0]);
}
}

View File

@ -4224,7 +4224,7 @@ static const float lossless_quant_d[32] = {
/* Vector quantization tables */
static const int8_t high_freq_vq[1024][32] =
DECLARE_ALIGNED(8, static const int8_t, high_freq_vq)[1024][32] =
{
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },

View File

@ -39,17 +39,16 @@
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
typedef struct DPCMContext {
int channels;
short roq_square_array[256];
long sample[2];//for SOL_DPCM
const int *sol_table;//for SOL_DPCM
int16_t roq_square_array[256];
int sample[2]; ///< previous sample (for SOL_DPCM)
const int8_t *sol_table; ///< delta table for SOL_DPCM
} DPCMContext;
#define SE_16BIT(x) if (x & 0x8000) x -= 0x10000;
static const int interplay_delta_table[] = {
static const int16_t interplay_delta_table[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23,
@ -85,15 +84,17 @@ static const int interplay_delta_table[] = {
};
static const int sol_table_old[16] =
{ 0x0, 0x1, 0x2 , 0x3, 0x6, 0xA, 0xF, 0x15,
-0x15, -0xF, -0xA, -0x6, -0x3, -0x2, -0x1, 0x0};
static const int8_t sol_table_old[16] = {
0x0, 0x1, 0x2, 0x3, 0x6, 0xA, 0xF, 0x15,
-0x15, -0xF, -0xA, -0x6, -0x3, -0x2, -0x1, 0x0
};
static const int sol_table_new[16] =
{ 0x0, 0x1, 0x2, 0x3, 0x6, 0xA, 0xF, 0x15,
0x0, -0x1, -0x2, -0x3, -0x6, -0xA, -0xF, -0x15};
static const int8_t sol_table_new[16] = {
0x0, 0x1, 0x2, 0x3, 0x6, 0xA, 0xF, 0x15,
0x0, -0x1, -0x2, -0x3, -0x6, -0xA, -0xF, -0x15
};
static const int sol_table_16[128] = {
static const int16_t sol_table_16[128] = {
0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
@ -110,12 +111,15 @@ static const int sol_table_16[128] = {
};
static av_cold int dpcm_decode_init(AVCodecContext *avctx)
{
DPCMContext *s = avctx->priv_data;
int i;
short square;
if (avctx->channels < 1 || avctx->channels > 2) {
av_log(avctx, AV_LOG_INFO, "invalid number of channels\n");
return AVERROR(EINVAL);
}
s->channels = avctx->channels;
s->sample[0] = s->sample[1] = 0;
@ -125,13 +129,12 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
case CODEC_ID_ROQ_DPCM:
/* initialize square table */
for (i = 0; i < 128; i++) {
square = i * i;
int16_t square = i * i;
s->roq_square_array[i ] = square;
s->roq_square_array[i + 128] = -square;
}
break;
case CODEC_ID_SOL_DPCM:
switch(avctx->codec_tag){
case 1:
@ -143,7 +146,6 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
s->sample[0] = s->sample[1] = 0x80;
break;
case 3:
s->sol_table=sol_table_16;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown SOL subcodec\n");
@ -155,146 +157,160 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
break;
}
if (avctx->codec->id == CODEC_ID_SOL_DPCM && avctx->codec_tag != 3)
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
else
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
return 0;
}
static int dpcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end = buf + buf_size;
DPCMContext *s = avctx->priv_data;
int in, out = 0;
int out = 0;
int predictor[2];
int channel_number = 0;
short *output_samples = data;
int shift[2];
unsigned char byte;
short diff;
int ch = 0;
int stereo = s->channels - 1;
int16_t *output_samples = data;
if (!buf_size)
return 0;
// almost every DPCM variant expands one byte of data into two
if(*data_size/2 < buf_size)
return -1;
/* calculate output size */
switch(avctx->codec->id) {
case CODEC_ID_ROQ_DPCM:
out = buf_size - 8;
break;
case CODEC_ID_INTERPLAY_DPCM:
out = buf_size - 6 - s->channels;
break;
case CODEC_ID_XAN_DPCM:
out = buf_size - 2 * s->channels;
break;
case CODEC_ID_SOL_DPCM:
if (avctx->codec_tag != 3)
out = buf_size * 2;
else
out = buf_size;
break;
}
out *= av_get_bytes_per_sample(avctx->sample_fmt);
if (out < 0) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL);
}
if (*data_size < out) {
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
return AVERROR(EINVAL);
}
switch(avctx->codec->id) {
case CODEC_ID_ROQ_DPCM:
if (s->channels == 1)
predictor[0] = AV_RL16(&buf[6]);
else {
predictor[0] = buf[7] << 8;
predictor[1] = buf[6] << 8;
buf += 6;
if (stereo) {
predictor[1] = (int16_t)(bytestream_get_byte(&buf) << 8);
predictor[0] = (int16_t)(bytestream_get_byte(&buf) << 8);
} else {
predictor[0] = (int16_t)bytestream_get_le16(&buf);
}
SE_16BIT(predictor[0]);
SE_16BIT(predictor[1]);
/* decode the samples */
for (in = 8, out = 0; in < buf_size; in++, out++) {
predictor[channel_number] += s->roq_square_array[buf[in]];
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out] = predictor[channel_number];
while (buf < buf_end) {
predictor[ch] += s->roq_square_array[*buf++];
predictor[ch] = av_clip_int16(predictor[ch]);
*output_samples++ = predictor[ch];
/* toggle channel */
channel_number ^= s->channels - 1;
ch ^= stereo;
}
break;
case CODEC_ID_INTERPLAY_DPCM:
in = 6; /* skip over the stream mask and stream length */
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0])
output_samples[out++] = predictor[0];
if (s->channels == 2) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1])
output_samples[out++] = predictor[1];
buf += 6; /* skip over the stream mask and stream length */
for (ch = 0; ch < s->channels; ch++) {
predictor[ch] = (int16_t)bytestream_get_le16(&buf);
*output_samples++ = predictor[ch];
}
while (in < buf_size) {
predictor[channel_number] += interplay_delta_table[buf[in++]];
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out++] = predictor[channel_number];
ch = 0;
while (buf < buf_end) {
predictor[ch] += interplay_delta_table[*buf++];
predictor[ch] = av_clip_int16(predictor[ch]);
*output_samples++ = predictor[ch];
/* toggle channel */
channel_number ^= s->channels - 1;
ch ^= stereo;
}
break;
case CODEC_ID_XAN_DPCM:
in = 0;
shift[0] = shift[1] = 4;
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0]);
if (s->channels == 2) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1]);
}
{
int shift[2] = { 4, 4 };
while (in < buf_size) {
byte = buf[in++];
diff = (byte & 0xFC) << 8;
if ((byte & 0x03) == 3)
shift[channel_number]++;
for (ch = 0; ch < s->channels; ch++)
predictor[ch] = (int16_t)bytestream_get_le16(&buf);
ch = 0;
while (buf < buf_end) {
uint8_t n = *buf++;
int16_t diff = (n & 0xFC) << 8;
if ((n & 0x03) == 3)
shift[ch]++;
else
shift[channel_number] -= (2 * (byte & 3));
shift[ch] -= (2 * (n & 3));
/* saturate the shifter to a lower limit of 0 */
if (shift[channel_number] < 0)
shift[channel_number] = 0;
if (shift[ch] < 0)
shift[ch] = 0;
diff >>= shift[channel_number];
predictor[channel_number] += diff;
diff >>= shift[ch];
predictor[ch] += diff;
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out++] = predictor[channel_number];
predictor[ch] = av_clip_int16(predictor[ch]);
*output_samples++ = predictor[ch];
/* toggle channel */
channel_number ^= s->channels - 1;
ch ^= stereo;
}
break;
}
case CODEC_ID_SOL_DPCM:
in = 0;
if (avctx->codec_tag != 3) {
if(*data_size/4 < buf_size)
return -1;
while (in < buf_size) {
int n1, n2;
n1 = (buf[in] >> 4) & 0xF;
n2 = buf[in++] & 0xF;
s->sample[0] += s->sol_table[n1];
if (s->sample[0] < 0) s->sample[0] = 0;
if (s->sample[0] > 255) s->sample[0] = 255;
output_samples[out++] = (s->sample[0] - 128) << 8;
s->sample[s->channels - 1] += s->sol_table[n2];
if (s->sample[s->channels - 1] < 0) s->sample[s->channels - 1] = 0;
if (s->sample[s->channels - 1] > 255) s->sample[s->channels - 1] = 255;
output_samples[out++] = (s->sample[s->channels - 1] - 128) << 8;
uint8_t *output_samples_u8 = data;
while (buf < buf_end) {
uint8_t n = *buf++;
s->sample[0] += s->sol_table[n >> 4];
s->sample[0] = av_clip_uint8(s->sample[0]);
*output_samples_u8++ = s->sample[0];
s->sample[stereo] += s->sol_table[n & 0x0F];
s->sample[stereo] = av_clip_uint8(s->sample[stereo]);
*output_samples_u8++ = s->sample[stereo];
}
} else {
while (in < buf_size) {
int n;
n = buf[in++];
if (n & 0x80) s->sample[channel_number] -= s->sol_table[n & 0x7F];
else s->sample[channel_number] += s->sol_table[n & 0x7F];
s->sample[channel_number] = av_clip_int16(s->sample[channel_number]);
output_samples[out++] = s->sample[channel_number];
while (buf < buf_end) {
uint8_t n = *buf++;
if (n & 0x80) s->sample[ch] -= sol_table_16[n & 0x7F];
else s->sample[ch] += sol_table_16[n & 0x7F];
s->sample[ch] = av_clip_int16(s->sample[ch]);
*output_samples++ = s->sample[ch];
/* toggle channel */
channel_number ^= s->channels - 1;
ch ^= stereo;
}
}
break;
}
*data_size = out * sizeof(short);
*data_size = out;
return buf_size;
}

View File

@ -162,18 +162,18 @@ static void fill_scaling_lists(struct dxva_context *ctx, const H264Context *h, D
for (j = 0; j < 16; j++)
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j];
for (j = 0; j < 64; j++) {
qm->bScalingLists8x8[0][j] = h->pps.scaling_matrix8[0][j];
qm->bScalingLists8x8[1][j] = h->pps.scaling_matrix8[3][j];
for (i = 0; i < 64; i++) {
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i];
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i];
}
} else {
for (i = 0; i < 6; i++)
for (j = 0; j < 16; j++)
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][zigzag_scan[j]];
for (j = 0; j < 64; j++) {
qm->bScalingLists8x8[0][j] = h->pps.scaling_matrix8[0][ff_zigzag_direct[j]];
qm->bScalingLists8x8[1][j] = h->pps.scaling_matrix8[3][ff_zigzag_direct[j]];
for (i = 0; i < 64; i++) {
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]];
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]];
}
}
}

View File

@ -427,13 +427,13 @@ static inline void decode_ac_coeffs(GetBitContext *gb, DCTELEM *out,
lev_cb_index = lev_to_cb_index[FFMIN(level, 9)];
bits_left = get_bits_left(gb);
if (bits_left <= 8 && !show_bits(gb, bits_left))
if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
return;
run = decode_vlc_codeword(gb, ac_codebook[run_cb_index]);
bits_left = get_bits_left(gb);
if (bits_left <= 8 && !show_bits(gb, bits_left))
if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
return;
level = decode_vlc_codeword(gb, ac_codebook[lev_cb_index]) + 1;

View File

@ -823,6 +823,11 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
avctx->pkt = avpkt;
if (!avpkt->data && avpkt->size) {
av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
return AVERROR(EINVAL);
}
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){

View File

@ -120,7 +120,7 @@ static int latm_write_frame_header(AVFormatContext *s, PutBitContext *bs)
}
put_bits(bs, 3, 0); /* frameLengthType */
put_bits(bs, 8, 0); /* latmBufferFullness */
put_bits(bs, 8, 0xff); /* latmBufferFullness */
put_bits(bs, 1, 0); /* otherDataPresent */
put_bits(bs, 1, 0); /* crcCheckPresent */

View File

@ -49,6 +49,10 @@ static int check_pes(uint8_t *p, uint8_t *end){
return pes1||pes2;
}
static int check_pack_header(const uint8_t *buf) {
return (buf[1] & 0xC0) == 0x40 || (buf[1] & 0xF0) == 0x20;
}
static int mpegps_probe(AVProbeData *p)
{
uint32_t code= -1;
@ -61,9 +65,10 @@ static int mpegps_probe(AVProbeData *p)
if ((code & 0xffffff00) == 0x100) {
int len= p->buf[i+1] << 8 | p->buf[i+2];
int pes= check_pes(p->buf+i, p->buf+p->buf_size);
int pack = check_pack_header(p->buf+i);
if(code == SYSTEM_HEADER_START_CODE) sys++;
else if(code == PACK_START_CODE) pspack++;
else if(code == PACK_START_CODE && pack) pspack++;
else if((code & 0xf0) == VIDEO_ID && pes) vid++;
// skip pes payload to avoid start code emulation for private
// and audio streams

View File

@ -3535,7 +3535,7 @@ void av_dump_format(AVFormatContext *ic,
int is_output)
{
int i;
uint8_t *printed = av_mallocz(ic->nb_streams);
uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
if (ic->nb_streams && !printed)
return;