1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  h264: stricter reference limit enforcement.
  h264: increase reference poc list from 16 to 32.
  xa_adpcm: limit filter to prevent xa_adpcm_table[] array bounds overruns.
  snow: check reference frame indices.
  snow: reject unsupported chroma shifts.
  Add ffvhuff encoding and decoding regression test
  anm: convert to bytestream2 API
  bytestream: add more unchecked variants for bytestream2 API
  jvdec: unbreak video decoding
  jv demux: set video stream duration
  fate: add pam image regression test

Conflicts:
	libavcodec/adpcm.c
	libavcodec/anm.c
	libavcodec/h264.c
	libavcodec/mpegvideo.h
	libavcodec/snowdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-03-15 01:21:16 +01:00
commit 67235dfa1d
12 changed files with 113 additions and 56 deletions

View File

@ -265,7 +265,8 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c
return c->predictor;
}
static void xa_decode(short *out, const unsigned char *in,
static int xa_decode(AVCodecContext *avctx,
short *out, const unsigned char *in,
ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
{
int i, j;
@ -278,7 +279,7 @@ static void xa_decode(short *out, const unsigned char *in,
shift = 12 - (in[4+i*2] & 15);
filter = in[4+i*2] >> 4;
if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
av_log_ask_for_sample(NULL, "unknown filter %d\n", filter);
av_log_ask_for_sample(avctx, "unknown XA-ADPCM filter %d\n", filter);
filter=0;
}
f0 = xa_adpcm_table[filter][0];
@ -309,7 +310,7 @@ static void xa_decode(short *out, const unsigned char *in,
shift = 12 - (in[5+i*2] & 15);
filter = in[5+i*2] >> 4;
if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
av_log_ask_for_sample(NULL, "unknown filter %d\n", filter);
av_log_ask_for_sample(avctx, "unknown XA-ADPCM filter %d\n", filter);
filter=0;
}
@ -336,6 +337,8 @@ static void xa_decode(short *out, const unsigned char *in,
left->sample2 = s_2;
}
}
return 0;
}
/**
@ -823,8 +826,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
break;
case CODEC_ID_ADPCM_XA:
while (buf_size >= 128) {
xa_decode(samples, src, &c->status[0], &c->status[1],
avctx->channels);
if ((ret = xa_decode(avctx, samples, src, &c->status[0],
&c->status[1], avctx->channels)) < 0)
return ret;
src += 128;
samples += 28 * 8;
buf_size -= 128;

View File

@ -30,26 +30,26 @@
typedef struct AnmContext {
AVFrame frame;
int palette[AVPALETTE_COUNT];
GetByteContext gb;
int x; ///< x coordinate position
} AnmContext;
static av_cold int decode_init(AVCodecContext *avctx)
{
AnmContext *s = avctx->priv_data;
const uint8_t *buf;
int i;
avctx->pix_fmt = PIX_FMT_PAL8;
if (avctx->extradata_size != 16*8 + 4*256)
return -1;
avcodec_get_frame_defaults(&s->frame);
s->frame.reference = 3;
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return -1;
buf = avctx->extradata + 16*8;
bytestream2_skipu(&s->gb, 16 * 8);
for (i = 0; i < 256; i++)
s->palette[i] = bytestream_get_le32(&buf);
s->palette[i] = bytestream2_get_le32u(&s->gb);
return 0;
}
@ -57,7 +57,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
/**
* Perform decode operation
* @param dst, dst_end Destination image buffer
* @param buf, buf_end Source buffer (optional, see below)
* @param gb, GetByteContext (optional, see below)
* @param pixel Fill color (optional, see below)
* @param count Pixel count
* @param x Pointer to x-axis counter
@ -65,24 +65,22 @@ static av_cold int decode_init(AVCodecContext *avctx)
* @param linesize Destination image buffer linesize
* @return non-zero if destination buffer is exhausted
*
* a copy operation is achieved when 'buf' is set
* a fill operation is acheived when 'buf' is null and pixel is >= 0
* a skip operation is acheived when 'buf' is null and pixel is < 0
* a copy operation is achieved when 'gb' is set
* a fill operation is acheived when 'gb' is null and pixel is >= 0
* a skip operation is acheived when 'gb' is null and pixel is < 0
*/
static inline int op(uint8_t **dst, const uint8_t *dst_end,
const uint8_t **buf, const uint8_t *buf_end,
GetByteContext *gb,
int pixel, int count,
int *x, int width, int linesize)
{
int remaining = width - *x;
while(count > 0) {
int striplen = FFMIN(count, remaining);
if (buf) {
striplen = FFMIN(striplen, buf_end - *buf);
if (*buf >= buf_end)
if (gb) {
if (bytestream2_get_bytes_left(gb) < striplen)
goto exhausted;
memcpy(*dst, *buf, striplen);
*buf += striplen;
bytestream2_get_bufferu(gb, *dst, striplen);
} else if (pixel >= 0)
memset(*dst, pixel, striplen);
*dst += striplen;
@ -111,9 +109,7 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
AnmContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
const int buf_size = avpkt->size;
const uint8_t *buf_end = buf + buf_size;
uint8_t *dst, *dst_end;
int count;
@ -124,35 +120,37 @@ static int decode_frame(AVCodecContext *avctx,
dst = s->frame.data[0];
dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height;
if (buf[0] != 0x42) {
bytestream2_init(&s->gb, avpkt->data, buf_size);
if (bytestream2_get_byte(&s->gb) != 0x42) {
av_log_ask_for_sample(avctx, "unknown record type\n");
return buf_size;
}
if (buf[1]) {
if (bytestream2_get_byte(&s->gb)) {
av_log_ask_for_sample(avctx, "padding bytes not supported\n");
return buf_size;
}
buf += 4;
bytestream2_skip(&s->gb, 2);
s->x = 0;
do {
/* if statements are ordered by probability */
#define OP(buf, pixel, count) \
op(&dst, dst_end, (buf), buf_end, (pixel), (count), &s->x, avctx->width, s->frame.linesize[0])
#define OP(gb, pixel, count) \
op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0])
int type = bytestream_get_byte(&buf);
int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F;
type >>= 7;
if (count) {
if (OP(type ? NULL : &buf, -1, count)) break;
if (OP(type ? NULL : &s->gb, -1, count)) break;
} else if (!type) {
int pixel;
count = bytestream_get_byte(&buf); /* count==0 gives nop */
pixel = bytestream_get_byte(&buf);
count = bytestream2_get_byte(&s->gb); /* count==0 gives nop */
pixel = bytestream2_get_byte(&s->gb);
if (OP(NULL, pixel, count)) break;
} else {
int pixel;
type = bytestream_get_le16(&buf);
type = bytestream2_get_le16(&s->gb);
count = type & 0x3FFF;
type >>= 14;
if (!count) {
@ -164,11 +162,11 @@ static int decode_frame(AVCodecContext *avctx,
}
continue;
}
pixel = type == 3 ? bytestream_get_byte(&buf) : -1;
pixel = type == 3 ? bytestream2_get_byte(&s->gb) : -1;
if (type == 1) count += 0x4000;
if (OP(type == 2 ? &buf : NULL, pixel, count)) break;
if (OP(type == 2 ? &s->gb : NULL, pixel, count)) break;
}
} while (buf + 1 < buf_end);
} while (bytestream2_get_bytes_left(&s->gb) > 0);
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);

View File

@ -170,6 +170,12 @@ static av_always_inline void bytestream2_skip(GetByteContext *g,
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
}
static av_always_inline void bytestream2_skipu(GetByteContext *g,
unsigned int size)
{
g->buffer += size;
}
static av_always_inline void bytestream2_skip_p(PutByteContext *p,
unsigned int size)
{
@ -257,6 +263,15 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
return size2;
}
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
uint8_t *dst,
unsigned int size)
{
memcpy(dst, g->buffer, size);
g->buffer += size;
return size;
}
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
const uint8_t *src,
unsigned int size)
@ -272,6 +287,15 @@ static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
return size2;
}
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
const uint8_t *src,
unsigned int size)
{
memcpy(p->buffer, src, size);
p->buffer += size;
return size;
}
static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
const uint8_t c,
unsigned int size)
@ -286,6 +310,14 @@ static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
p->buffer += size2;
}
static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
const uint8_t c,
unsigned int size)
{
memset(p->buffer, c, size);
p->buffer += size;
}
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
{
return p->eof;

View File

@ -3034,7 +3034,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->ref_count[1]= h->pps.ref_count[1];
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
unsigned max= s->picture_structure == PICT_FRAME ? 15 : 31;
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
h->direct_spatial_mv_pred= get_bits1(&s->gb);
}
@ -3044,13 +3045,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
}
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
h->ref_count[0] = h->ref_count[1] = 1;
return -1;
return AVERROR_INVALIDDATA;
}
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
h->list_count= 2;
else

View File

@ -150,7 +150,7 @@ static int decode_frame(AVCodecContext *avctx,
if (video_type == 0 || video_type == 1) {
GetBitContext gb;
init_get_bits(&gb, buf, FFMIN(video_size, (buf_end - buf) * 8));
init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf));
for (j = 0; j < avctx->height; j += 8)
for (i = 0; i < avctx->width; i += 8)

View File

@ -142,6 +142,7 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
int res;
if(s->keyframe){
set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA);
@ -170,7 +171,7 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
if (ref >= s->ref_frames) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
return -1;
return AVERROR_INVALIDDATA;
}
pred_mv(s, &mx, &my, ref, left, top, tr);
mx+= get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
@ -178,14 +179,11 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
}
set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
}else{
if (decode_q_branch(s, level+1, 2*x+0, 2*y+0)<0)
return -1;
if (decode_q_branch(s, level+1, 2*x+1, 2*y+0)<0)
return -1;
if (decode_q_branch(s, level+1, 2*x+0, 2*y+1)<0)
return -1;
if (decode_q_branch(s, level+1, 2*x+1, 2*y+1)<0)
return -1;
if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
(res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
(res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
(res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
return res;
}
return 0;
}
@ -367,11 +365,12 @@ static int decode_blocks(SnowContext *s){
int x, y;
int w= s->b_width;
int h= s->b_height;
int res;
for(y=0; y<h; y++){
for(x=0; x<w; x++){
if (decode_q_branch(s, 0, x, y) < 0)
return -1;
if ((res = decode_q_branch(s, 0, x, y)) < 0)
return res;
}
}
return 0;
@ -385,6 +384,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
int bytes_read;
AVFrame *picture = data;
int level, orientation, plane_index;
int res;
ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
@ -413,8 +413,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
if(avctx->debug&FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_ERROR, "keyframe:%d qlog:%d\n", s->keyframe, s->qlog);
if (decode_blocks(s) < 0)
return -1;
if ((res = decode_blocks(s)) < 0)
return res;
for(plane_index=0; plane_index<3; plane_index++){
Plane *p= &s->plane[plane_index];

View File

@ -79,6 +79,7 @@ static int read_header(AVFormatContext *s)
vst->codec->codec_tag = 0; /* no fourcc */
vst->codec->width = avio_rl16(pb);
vst->codec->height = avio_rl16(pb);
vst->duration =
vst->nb_frames =
ast->nb_index_entries = avio_rl16(pb);
avpriv_set_pts_info(vst, 64, avio_rl16(pb), 1000);

View File

@ -227,6 +227,11 @@ do_video_encoding ffv1.avi "-strict -2 -an -vcodec ffv1"
do_video_decoding
fi
if [ -n "$do_ffvhuff" ] ; then
do_video_encoding ffvhuff.avi "-an -vcodec ffvhuff"
do_video_decoding ""
fi
if [ -n "$do_snow" ] ; then
do_video_encoding snow.avi "-strict -2 -an -vcodec snow -qscale 2 -flags +qpel -me_method iter -dia_size 2 -cmp 12 -subcmp 12 -s 128x64"
do_video_decoding "" "-s 352x288"

View File

@ -211,6 +211,10 @@ if [ -n "$do_jpg" ] ; then
do_image_formats jpg "-pix_fmt yuvj420p" "-f image2"
fi
if [ -n "$do_pam" ] ; then
do_image_formats pam
fi
if [ -n "$do_pcx" ] ; then
do_image_formats pcx
fi

3
tests/ref/lavf/pam Normal file
View File

@ -0,0 +1,3 @@
0dce5565222cf0f8b309467f279aecd2 *./tests/data/images/pam/02.pam
./tests/data/images/pam/%02d.pam CRC=0x6da01946
304191 ./tests/data/images/pam/02.pam

View File

@ -0,0 +1,4 @@
0632ffae6f1e06dd299bf41a845b9099 *./tests/data/vsynth1/ffvhuff.avi
5987208 ./tests/data/vsynth1/ffvhuff.avi
c5ccac874dbf808e9088bc3107860042 *./tests/data/ffvhuff.vsynth1.out.yuv
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 7603200/ 7603200

View File

@ -0,0 +1,4 @@
63926d8835dd5779dca0a4bc081ca8ae *./tests/data/vsynth2/ffvhuff.avi
4988056 ./tests/data/vsynth2/ffvhuff.avi
dde5895817ad9d219f79a52d0bdfb001 *./tests/data/ffvhuff.vsynth2.out.yuv
stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 7603200/ 7603200