mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: make av_interleaved_write_frame() flush packets when pkt is NULL mpegts: Fix dead error checks vc1: Do not read from array if index is invalid. targa: convert to bytestream2. rv34: set mb_num_left to 0 after finishing a frame Conflicts: libavcodec/targa.c libavcodec/vc1data.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
6999f8bcf5
@ -1580,6 +1580,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
|
|||||||
|
|
||||||
ff_er_frame_end(s);
|
ff_er_frame_end(s);
|
||||||
ff_MPV_frame_end(s);
|
ff_MPV_frame_end(s);
|
||||||
|
s->mb_num_left = 0;
|
||||||
|
|
||||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||||
@ -1778,6 +1779,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
|||||||
* only complete frames */
|
* only complete frames */
|
||||||
ff_er_frame_end(s);
|
ff_er_frame_end(s);
|
||||||
ff_MPV_frame_end(s);
|
ff_MPV_frame_end(s);
|
||||||
|
s->mb_num_left = 0;
|
||||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@ -27,83 +27,76 @@
|
|||||||
|
|
||||||
typedef struct TargaContext {
|
typedef struct TargaContext {
|
||||||
AVFrame picture;
|
AVFrame picture;
|
||||||
|
GetByteContext gb;
|
||||||
|
|
||||||
int width, height;
|
|
||||||
int bpp;
|
|
||||||
int color_type;
|
int color_type;
|
||||||
int compression_type;
|
int compression_type;
|
||||||
} TargaContext;
|
} TargaContext;
|
||||||
|
|
||||||
#define CHECK_BUFFER_SIZE(buf, buf_end, needed, where) \
|
static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s,
|
||||||
if(needed > buf_end - buf){ \
|
uint8_t *dst, int w, int h, int stride, int bpp)
|
||||||
av_log(avctx, AV_LOG_ERROR, "Problem: unexpected end of data while reading " where "\n"); \
|
|
||||||
return -1; \
|
|
||||||
} \
|
|
||||||
|
|
||||||
static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s, const uint8_t *src, int src_size, uint8_t *dst, int w, int h, int stride, int bpp)
|
|
||||||
{
|
{
|
||||||
int i, x, y;
|
int x, y;
|
||||||
int depth = (bpp + 1) >> 3;
|
int depth = (bpp + 1) >> 3;
|
||||||
int type, count;
|
int type, count;
|
||||||
int diff;
|
int diff;
|
||||||
const uint8_t *src_end = src + src_size;
|
|
||||||
|
|
||||||
diff = stride - w * depth;
|
diff = stride - w * depth;
|
||||||
x = y = 0;
|
x = y = 0;
|
||||||
while(y < h){
|
while (y < h) {
|
||||||
CHECK_BUFFER_SIZE(src, src_end, 1, "image type");
|
if (bytestream2_get_bytes_left(&s->gb) <= 0) {
|
||||||
type = *src++;
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Ran ouf of data before end-of-image\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
type = bytestream2_get_byteu(&s->gb);
|
||||||
count = (type & 0x7F) + 1;
|
count = (type & 0x7F) + 1;
|
||||||
type &= 0x80;
|
type &= 0x80;
|
||||||
if(x + count > (h - y) * w){
|
if(x + count > (h - y) * w){
|
||||||
av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds: position (%i,%i) size %i\n", x, y, count);
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
return -1;
|
"Packet went out of bounds: position (%i,%i) size %i\n",
|
||||||
|
x, y, count);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if(type){
|
if (!type) {
|
||||||
CHECK_BUFFER_SIZE(src, src_end, depth, "image data");
|
do {
|
||||||
}else{
|
int n = FFMIN(count, w - x);
|
||||||
CHECK_BUFFER_SIZE(src, src_end, count * depth, "image data");
|
bytestream2_get_buffer(&s->gb, dst, n * depth);
|
||||||
|
count -= n;
|
||||||
|
dst += n * depth;
|
||||||
|
x += n;
|
||||||
|
if (x == w) {
|
||||||
|
x = 0;
|
||||||
|
y++;
|
||||||
|
dst += diff;
|
||||||
|
}
|
||||||
|
} while (count > 0);
|
||||||
|
} else {
|
||||||
|
uint8_t tmp[4];
|
||||||
|
bytestream2_get_buffer(&s->gb, tmp, depth);
|
||||||
|
do {
|
||||||
|
int n = FFMIN(count, w - x);
|
||||||
|
count -= n;
|
||||||
|
x += n;
|
||||||
|
do {
|
||||||
|
memcpy(dst, tmp, depth);
|
||||||
|
dst += depth;
|
||||||
|
} while (--n);
|
||||||
|
if (x == w) {
|
||||||
|
x = 0;
|
||||||
|
y++;
|
||||||
|
dst += diff;
|
||||||
|
}
|
||||||
|
} while (count > 0);
|
||||||
}
|
}
|
||||||
for(i = 0; i < count; i++){
|
|
||||||
switch(depth){
|
|
||||||
case 1:
|
|
||||||
*dst = *src;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
AV_WN16A(dst, AV_RN16A(src));
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
dst[0] = src[0];
|
|
||||||
dst[1] = src[1];
|
|
||||||
dst[2] = src[2];
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
AV_WN32A(dst, AV_RN32A(src));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
dst += depth;
|
|
||||||
if(!type)
|
|
||||||
src += depth;
|
|
||||||
|
|
||||||
x++;
|
|
||||||
if(x == w){
|
|
||||||
x = 0;
|
|
||||||
y++;
|
|
||||||
dst += diff;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(type)
|
|
||||||
src += depth;
|
|
||||||
}
|
}
|
||||||
return src_size;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx,
|
||||||
void *data, int *data_size,
|
void *data, int *data_size,
|
||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
|
||||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
|
||||||
TargaContext * const s = avctx->priv_data;
|
TargaContext * const s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
AVFrame *picture = data;
|
||||||
AVFrame * const p = &s->picture;
|
AVFrame * const p = &s->picture;
|
||||||
@ -112,32 +105,38 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int idlen, pal, compr, y, w, h, bpp, flags;
|
int idlen, pal, compr, y, w, h, bpp, flags;
|
||||||
int first_clr, colors, csize;
|
int first_clr, colors, csize;
|
||||||
|
|
||||||
|
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
|
||||||
|
|
||||||
/* parse image header */
|
/* parse image header */
|
||||||
CHECK_BUFFER_SIZE(buf, buf_end, 18, "header");
|
idlen = bytestream2_get_byte(&s->gb);
|
||||||
idlen = *buf++;
|
pal = bytestream2_get_byte(&s->gb);
|
||||||
pal = *buf++;
|
compr = bytestream2_get_byte(&s->gb);
|
||||||
compr = *buf++;
|
first_clr = bytestream2_get_le16(&s->gb);
|
||||||
first_clr = bytestream_get_le16(&buf);
|
colors = bytestream2_get_le16(&s->gb);
|
||||||
colors = bytestream_get_le16(&buf);
|
csize = bytestream2_get_byte(&s->gb);
|
||||||
csize = *buf++;
|
bytestream2_skip(&s->gb, 4); /* 2: x, 2: y */
|
||||||
|
w = bytestream2_get_le16(&s->gb);
|
||||||
|
h = bytestream2_get_le16(&s->gb);
|
||||||
|
bpp = bytestream2_get_byte(&s->gb);
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&s->gb) <= idlen) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Not enough data to read header\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
flags = bytestream2_get_byte(&s->gb);
|
||||||
|
|
||||||
if (!pal && (first_clr || colors || csize)) {
|
if (!pal && (first_clr || colors || csize)) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n");
|
av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n");
|
||||||
// specification says we should ignore those value in this case
|
// specification says we should ignore those value in this case
|
||||||
first_clr = colors = csize = 0;
|
first_clr = colors = csize = 0;
|
||||||
}
|
}
|
||||||
buf += 2; /* x */
|
|
||||||
y = bytestream_get_le16(&buf);
|
// skip identifier if any
|
||||||
w = bytestream_get_le16(&buf);
|
bytestream2_skip(&s->gb, idlen);
|
||||||
h = bytestream_get_le16(&buf);
|
|
||||||
bpp = *buf++;
|
switch(bpp){
|
||||||
flags = *buf++;
|
|
||||||
//skip identifier if any
|
|
||||||
CHECK_BUFFER_SIZE(buf, buf_end, idlen, "identifiers");
|
|
||||||
buf += idlen;
|
|
||||||
s->bpp = bpp;
|
|
||||||
s->width = w;
|
|
||||||
s->height = h;
|
|
||||||
switch(s->bpp){
|
|
||||||
case 8:
|
case 8:
|
||||||
avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? PIX_FMT_GRAY8 : PIX_FMT_PAL8;
|
avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? PIX_FMT_GRAY8 : PIX_FMT_PAL8;
|
||||||
break;
|
break;
|
||||||
@ -152,7 +151,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
avctx->pix_fmt = PIX_FMT_BGRA;
|
avctx->pix_fmt = PIX_FMT_BGRA;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", s->bpp);
|
av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", bpp);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,23 +189,27 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
pal_size = colors * pal_sample_size;
|
pal_size = colors * pal_sample_size;
|
||||||
CHECK_BUFFER_SIZE(buf, buf_end, pal_size, "color table");
|
|
||||||
if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway
|
if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway
|
||||||
buf += pal_size;
|
bytestream2_skip(&s->gb, pal_size);
|
||||||
else{
|
else{
|
||||||
int t;
|
int t;
|
||||||
uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr;
|
uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr;
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&s->gb) < pal_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Not enough data to read palette\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
switch (pal_sample_size) {
|
switch (pal_sample_size) {
|
||||||
case 3:
|
case 3:
|
||||||
/* RGB24 */
|
/* RGB24 */
|
||||||
for (t = 0; t < colors; t++)
|
for (t = 0; t < colors; t++)
|
||||||
*pal++ = (0xffU<<24) | bytestream_get_le24(&buf);
|
*pal++ = (0xffU<<24) | bytestream2_get_le24u(&s->gb);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
/* RGB555 */
|
/* RGB555 */
|
||||||
for (t = 0; t < colors; t++) {
|
for (t = 0; t < colors; t++) {
|
||||||
uint32_t v = bytestream_get_le16(&buf);
|
uint32_t v = bytestream2_get_le16u(&s->gb);
|
||||||
v = ((v & 0x7C00) << 9) |
|
v = ((v & 0x7C00) << 9) |
|
||||||
((v & 0x03E0) << 6) |
|
((v & 0x03E0) << 6) |
|
||||||
((v & 0x001F) << 3);
|
((v & 0x001F) << 3);
|
||||||
@ -219,44 +222,45 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
p->palette_has_changed = 1;
|
p->palette_has_changed = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if((compr & (~TGA_RLE)) == TGA_NODATA)
|
if ((compr & (~TGA_RLE)) == TGA_NODATA) {
|
||||||
memset(p->data[0], 0, p->linesize[0] * s->height);
|
memset(p->data[0], 0, p->linesize[0] * h);
|
||||||
else{
|
} else {
|
||||||
if(compr & TGA_RLE){
|
if(compr & TGA_RLE){
|
||||||
int res = targa_decode_rle(avctx, s, buf, buf_end - buf, dst, avctx->width, avctx->height, stride, bpp);
|
int res = targa_decode_rle(avctx, s, dst, w, h, stride, bpp);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
return -1;
|
return res;
|
||||||
buf += res;
|
} else {
|
||||||
}else{
|
size_t img_size = w * ((bpp + 1) >> 3);
|
||||||
size_t img_size = s->width * ((s->bpp + 1) >> 3);
|
if (bytestream2_get_bytes_left(&s->gb) < img_size * h) {
|
||||||
CHECK_BUFFER_SIZE(buf, buf_end, img_size * s->height , "image data");
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
for(y = 0; y < s->height; y++){
|
"Not enough data available for image\n");
|
||||||
memcpy(dst, buf, img_size);
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
for (y = 0; y < h; y++) {
|
||||||
|
bytestream2_get_bufferu(&s->gb, dst, img_size);
|
||||||
dst += stride;
|
dst += stride;
|
||||||
buf += img_size;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(flags & 0x10){ // right-to-left, needs horizontal flip
|
if(flags & 0x10){ // right-to-left, needs horizontal flip
|
||||||
int x;
|
int x;
|
||||||
for(y = 0; y < s->height; y++){
|
for(y = 0; y < h; y++){
|
||||||
void *line = &p->data[0][y * p->linesize[0]];
|
void *line = &p->data[0][y * p->linesize[0]];
|
||||||
for(x = 0; x < s->width >> 1; x++){
|
for(x = 0; x < w >> 1; x++){
|
||||||
switch(s->bpp){
|
switch(bpp){
|
||||||
case 32:
|
case 32:
|
||||||
FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[s->width - x - 1]);
|
FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[w - x - 1]);
|
||||||
break;
|
break;
|
||||||
case 24:
|
case 24:
|
||||||
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * s->width - 3 * x - 3]);
|
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * w - 3 * x - 3]);
|
||||||
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * s->width - 3 * x - 2]);
|
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * w - 3 * x - 2]);
|
||||||
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * s->width - 3 * x - 1]);
|
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * w - 3 * x - 1]);
|
||||||
break;
|
break;
|
||||||
case 16:
|
case 16:
|
||||||
FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[s->width - x - 1]);
|
FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[w - x - 1]);
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[s->width - x - 1]);
|
FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[w - x - 1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -493,7 +493,7 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
|
|||||||
int nr, dr;
|
int nr, dr;
|
||||||
nr = get_bits(gb, 8);
|
nr = get_bits(gb, 8);
|
||||||
dr = get_bits(gb, 4);
|
dr = get_bits(gb, 4);
|
||||||
if (nr && nr < 8 && dr && dr < 3) {
|
if (nr > 0 && nr < 8 && dr > 0 && dr < 3) {
|
||||||
v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
|
v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
|
||||||
v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
|
v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ const uint8_t ff_vc1_mbmode_intfrp[2][15][4] = {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72},
|
const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72 },
|
||||||
ff_vc1_fps_dr[2] = { 1000, 1001 };
|
ff_vc1_fps_dr[2] = { 1000, 1001 };
|
||||||
const uint8_t ff_vc1_pquant_table[3][32] = {
|
const uint8_t ff_vc1_pquant_table[3][32] = {
|
||||||
/* Implicit quantizer */
|
/* Implicit quantizer */
|
||||||
|
@ -2374,6 +2374,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
|||||||
int16_t *dc_val;
|
int16_t *dc_val;
|
||||||
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
||||||
int q1, q2 = 0;
|
int q1, q2 = 0;
|
||||||
|
int dqscale_index;
|
||||||
|
|
||||||
wrap = s->block_wrap[n];
|
wrap = s->block_wrap[n];
|
||||||
dc_val = s->dc_val[0] + s->block_index[n];
|
dc_val = s->dc_val[0] + s->block_index[n];
|
||||||
@ -2386,15 +2387,18 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
|||||||
a = dc_val[ - wrap];
|
a = dc_val[ - wrap];
|
||||||
/* scale predictors if needed */
|
/* scale predictors if needed */
|
||||||
q1 = s->current_picture.f.qscale_table[mb_pos];
|
q1 = s->current_picture.f.qscale_table[mb_pos];
|
||||||
|
dqscale_index = s->y_dc_scale_table[q1] - 1;
|
||||||
|
if (dqscale_index < 0)
|
||||||
|
return 0;
|
||||||
if (c_avail && (n != 1 && n != 3)) {
|
if (c_avail && (n != 1 && n != 3)) {
|
||||||
q2 = s->current_picture.f.qscale_table[mb_pos - 1];
|
q2 = s->current_picture.f.qscale_table[mb_pos - 1];
|
||||||
if (q2 && q2 != q1)
|
if (q2 && q2 != q1)
|
||||||
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
if (a_avail && (n != 2 && n != 3)) {
|
if (a_avail && (n != 2 && n != 3)) {
|
||||||
q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
|
q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
|
||||||
if (q2 && q2 != q1)
|
if (q2 && q2 != q1)
|
||||||
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
if (a_avail && c_avail && (n != 3)) {
|
if (a_avail && c_avail && (n != 3)) {
|
||||||
int off = mb_pos;
|
int off = mb_pos;
|
||||||
@ -2404,7 +2408,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
|||||||
off -= s->mb_stride;
|
off -= s->mb_stride;
|
||||||
q2 = s->current_picture.f.qscale_table[off];
|
q2 = s->current_picture.f.qscale_table[off];
|
||||||
if (q2 && q2 != q1)
|
if (q2 && q2 != q1)
|
||||||
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
|
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (a_avail && c_avail) {
|
if (a_avail && c_avail) {
|
||||||
@ -2821,6 +2825,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (dc_pred_dir) { // left
|
if (dc_pred_dir) { // left
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
@ -2863,6 +2869,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
if (q2 && q1 != q2) {
|
if (q2 && q1 != q2) {
|
||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
@ -2873,6 +2881,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
if (q2 && q1 != q2) {
|
if (q2 && q1 != q2) {
|
||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
@ -3031,6 +3041,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (dc_pred_dir) { // left
|
if (dc_pred_dir) { // left
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
@ -3073,6 +3085,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
if (q2 && q1 != q2) {
|
if (q2 && q1 != q2) {
|
||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
@ -3083,6 +3097,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
|
|||||||
if (q2 && q1 != q2) {
|
if (q2 && q1 != q2) {
|
||||||
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
|
||||||
|
if (q1 < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
for (k = 1; k < 8; k++)
|
for (k = 1; k < 8; k++)
|
||||||
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
|
||||||
}
|
}
|
||||||
|
@ -1607,6 +1607,8 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt);
|
|||||||
* @param s media file handle
|
* @param s media file handle
|
||||||
* @param pkt The packet containing the data to be written. Libavformat takes
|
* @param pkt The packet containing the data to be written. Libavformat takes
|
||||||
* ownership of the data and will free it when it sees fit using the packet's
|
* ownership of the data and will free it when it sees fit using the packet's
|
||||||
|
* This can be NULL (at any time, not just at the end), to flush the
|
||||||
|
* interleaving queues.
|
||||||
* @ref AVPacket.destruct "destruct" field. The caller must not access the data
|
* @ref AVPacket.destruct "destruct" field. The caller must not access the data
|
||||||
* after this function returns, as it may already be freed.
|
* after this function returns, as it may already be freed.
|
||||||
* Packet's @ref AVPacket.stream_index "stream_index" field must be set to the
|
* Packet's @ref AVPacket.stream_index "stream_index" field must be set to the
|
||||||
|
@ -1428,17 +1428,19 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
clear_program(ts, h->id);
|
clear_program(ts, h->id);
|
||||||
pcr_pid = get16(&p, p_end) & 0x1fff;
|
pcr_pid = get16(&p, p_end);
|
||||||
if (pcr_pid < 0)
|
if (pcr_pid < 0)
|
||||||
return;
|
return;
|
||||||
|
pcr_pid &= 0x1fff;
|
||||||
add_pid_to_pmt(ts, h->id, pcr_pid);
|
add_pid_to_pmt(ts, h->id, pcr_pid);
|
||||||
set_pcr_pid(ts->stream, h->id, pcr_pid);
|
set_pcr_pid(ts->stream, h->id, pcr_pid);
|
||||||
|
|
||||||
av_dlog(ts->stream, "pcr_pid=0x%x\n", pcr_pid);
|
av_dlog(ts->stream, "pcr_pid=0x%x\n", pcr_pid);
|
||||||
|
|
||||||
program_info_length = get16(&p, p_end) & 0xfff;
|
program_info_length = get16(&p, p_end);
|
||||||
if (program_info_length < 0)
|
if (program_info_length < 0)
|
||||||
return;
|
return;
|
||||||
|
program_info_length &= 0xfff;
|
||||||
while(program_info_length >= 2) {
|
while(program_info_length >= 2) {
|
||||||
uint8_t tag, len;
|
uint8_t tag, len;
|
||||||
tag = get8(&p, p_end);
|
tag = get8(&p, p_end);
|
||||||
@ -1476,9 +1478,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
stream_type = get8(&p, p_end);
|
stream_type = get8(&p, p_end);
|
||||||
if (stream_type < 0)
|
if (stream_type < 0)
|
||||||
break;
|
break;
|
||||||
pid = get16(&p, p_end) & 0x1fff;
|
pid = get16(&p, p_end);
|
||||||
if (pid < 0)
|
if (pid < 0)
|
||||||
break;
|
break;
|
||||||
|
pid &= 0x1fff;
|
||||||
|
|
||||||
/* now create stream */
|
/* now create stream */
|
||||||
if (ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES) {
|
if (ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES) {
|
||||||
@ -1516,9 +1519,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
|
|
||||||
ff_program_add_stream_index(ts->stream, h->id, st->index);
|
ff_program_add_stream_index(ts->stream, h->id, st->index);
|
||||||
|
|
||||||
desc_list_len = get16(&p, p_end) & 0xfff;
|
desc_list_len = get16(&p, p_end);
|
||||||
if (desc_list_len < 0)
|
if (desc_list_len < 0)
|
||||||
break;
|
break;
|
||||||
|
desc_list_len &= 0xfff;
|
||||||
desc_list_end = p + desc_list_len;
|
desc_list_end = p + desc_list_len;
|
||||||
if (desc_list_end > p_end)
|
if (desc_list_end > p_end)
|
||||||
break;
|
break;
|
||||||
@ -1565,9 +1569,10 @@ static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
sid = get16(&p, p_end);
|
sid = get16(&p, p_end);
|
||||||
if (sid < 0)
|
if (sid < 0)
|
||||||
break;
|
break;
|
||||||
pmt_pid = get16(&p, p_end) & 0x1fff;
|
pmt_pid = get16(&p, p_end);
|
||||||
if (pmt_pid < 0)
|
if (pmt_pid < 0)
|
||||||
break;
|
break;
|
||||||
|
pmt_pid &= 0x1fff;
|
||||||
|
|
||||||
av_dlog(ts->stream, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
|
av_dlog(ts->stream, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
|
||||||
|
|
||||||
@ -1617,9 +1622,10 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
val = get8(&p, p_end);
|
val = get8(&p, p_end);
|
||||||
if (val < 0)
|
if (val < 0)
|
||||||
break;
|
break;
|
||||||
desc_list_len = get16(&p, p_end) & 0xfff;
|
desc_list_len = get16(&p, p_end);
|
||||||
if (desc_list_len < 0)
|
if (desc_list_len < 0)
|
||||||
break;
|
break;
|
||||||
|
desc_list_len &= 0xfff;
|
||||||
desc_list_end = p + desc_list_len;
|
desc_list_end = p + desc_list_len;
|
||||||
if (desc_list_end > p_end)
|
if (desc_list_end > p_end)
|
||||||
break;
|
break;
|
||||||
|
@ -3481,24 +3481,30 @@ static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, in
|
|||||||
}
|
}
|
||||||
|
|
||||||
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
|
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
|
||||||
AVStream *st= s->streams[ pkt->stream_index];
|
int ret, flush = 0;
|
||||||
int ret;
|
|
||||||
|
|
||||||
//FIXME/XXX/HACK drop zero sized packets
|
if (pkt) {
|
||||||
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
|
AVStream *st= s->streams[ pkt->stream_index];
|
||||||
return 0;
|
|
||||||
|
|
||||||
av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
|
//FIXME/XXX/HACK drop zero sized packets
|
||||||
pkt->size, pkt->dts, pkt->pts);
|
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
|
||||||
if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
return 0;
|
||||||
return ret;
|
|
||||||
|
|
||||||
if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
|
||||||
return AVERROR(EINVAL);
|
pkt->size, pkt->dts, pkt->pts);
|
||||||
|
if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
} else {
|
||||||
|
av_dlog(s, "av_interleaved_write_frame FLUSH\n");
|
||||||
|
flush = 1;
|
||||||
|
}
|
||||||
|
|
||||||
for(;;){
|
for(;;){
|
||||||
AVPacket opkt;
|
AVPacket opkt;
|
||||||
int ret= interleave_packet(s, &opkt, pkt, 0);
|
int ret= interleave_packet(s, &opkt, pkt, flush);
|
||||||
if(ret<=0) //FIXME cleanup needed for ret<0 ?
|
if(ret<=0) //FIXME cleanup needed for ret<0 ?
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user