1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '7bcaeb408e3eb2d2f37a306009fa7fe7eb0f1d79'

* commit '7bcaeb408e3eb2d2f37a306009fa7fe7eb0f1d79':
  mjpegdec: fix indentation
  rawdec: cosmetics, reformat
  mimic: return meaningful error codes.

Conflicts:
	libavcodec/mjpegdec.c
	libavcodec/rawdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-01-26 15:42:29 +01:00
commit 8380fc8884
3 changed files with 115 additions and 100 deletions

View File

@ -112,14 +112,15 @@ static const uint8_t col_zag[64] = {
static av_cold int mimic_decode_init(AVCodecContext *avctx)
{
MimicContext *ctx = avctx->priv_data;
int ret;
ctx->prev_index = 0;
ctx->cur_index = 15;
if (init_vlc(&ctx->vlc, 11, FF_ARRAY_ELEMS(huffbits),
huffbits, 1, 1, huffcodes, 4, 4, 0)) {
if ((ret = init_vlc(&ctx->vlc, 11, FF_ARRAY_ELEMS(huffbits),
huffbits, 1, 1, huffcodes, 4, 4, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "error initializing vlc table\n");
return -1;
return ret;
}
ff_dsputil_init(&ctx->dsp, avctx);
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, col_zag);
@ -198,16 +199,16 @@ static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale)
vlc = get_vlc2(&ctx->gb, ctx->vlc.table, ctx->vlc.bits, 3);
if (!vlc) /* end-of-block code */
return 1;
if (vlc == -1)
return 0;
if (vlc == -1)
return AVERROR_INVALIDDATA;
/* pos_add and num_bits are coded in the vlc code */
pos += vlc & 15; // pos_add
num_bits = vlc >> 4; // num_bits
if (pos >= 64)
return 0;
return AVERROR_INVALIDDATA;
value = get_bits(&ctx->gb, num_bits);
@ -223,13 +224,13 @@ static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale)
block[ctx->scantable.permutated[pos]] = coeff;
}
return 1;
return 0;
}
static int decode(MimicContext *ctx, int quality, int num_coeffs,
int is_iframe)
{
int y, x, plane, cur_row = 0;
int ret, y, x, plane, cur_row = 0;
for (plane = 0; plane < 3; plane++) {
const int is_chroma = !!plane;
@ -250,8 +251,12 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
* frames preceding the previous. (get_bits1 == 1)
* Chroma planes don't use backreferences. */
if (is_chroma || is_iframe || !get_bits1(&ctx->gb)) {
if (!vlc_decode_block(ctx, num_coeffs, qscale))
return 0;
if ((ret = vlc_decode_block(ctx, num_coeffs,
qscale)) < 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "Error decoding "
"block.\n");
return ret;
}
ctx->dsp.idct_put(dst, stride, ctx->dct_block);
} else {
unsigned int backref = get_bits(&ctx->gb, 4);
@ -285,7 +290,7 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
}
}
return 1;
return 0;
}
/**
@ -317,7 +322,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
if (buf_size <= MIMIC_HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "insufficient data\n");
return -1;
return AVERROR_INVALIDDATA;
}
bytestream2_init(&gb, buf, MIMIC_HEADER_SIZE);
@ -336,7 +341,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
if (!(width == 160 && height == 120) &&
!(width == 320 && height == 240)) {
av_log(avctx, AV_LOG_ERROR, "invalid width/height!\n");
return -1;
return AVERROR_INVALIDDATA;
}
ctx->avctx = avctx;
@ -348,21 +353,21 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
ctx->num_hblocks[i] = width >> (3 + !!i);
}
} else if (width != ctx->avctx->width || height != ctx->avctx->height) {
av_log(avctx, AV_LOG_ERROR, "resolution changing is not supported\n");
return -1;
av_log_missing_feature(avctx, "resolution changing", 1);
return AVERROR_PATCHWELCOME;
}
if (is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) {
av_log(avctx, AV_LOG_ERROR, "decoding must start with keyframe\n");
return -1;
return AVERROR_INVALIDDATA;
}
ctx->buf_ptrs[ctx->cur_index].reference = 3;
ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P :
AV_PICTURE_TYPE_I;
if (ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) {
if ((res = ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
return res;
}
ctx->next_prev_index = ctx->cur_index;
@ -384,10 +389,10 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
res = decode(ctx, quality, num_coeffs, !is_pframe);
ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
if (!res) {
if (res < 0) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
return -1;
return res;
}
}

View File

@ -1735,23 +1735,23 @@ eoi_parser:
if (s->bottom_field == !s->interlace_polarity)
break;
}
*picture = *s->picture_ptr;
*got_frame = 1;
s->got_picture = 0;
*picture = *s->picture_ptr;
*got_frame = 1;
s->got_picture = 0;
if (!s->lossless) {
picture->quality = FFMAX3(s->qscale[0],
s->qscale[1],
s->qscale[2]);
picture->qstride = 0;
picture->qscale_table = s->qscale_table;
memset(picture->qscale_table, picture->quality,
(s->width + 15) / 16);
if (avctx->debug & FF_DEBUG_QP)
av_log(avctx, AV_LOG_DEBUG,
"QP: %d\n", picture->quality);
picture->quality *= FF_QP2LAMBDA;
}
if (!s->lossless) {
picture->quality = FFMAX3(s->qscale[0],
s->qscale[1],
s->qscale[2]);
picture->qstride = 0;
picture->qscale_table = s->qscale_table;
memset(picture->qscale_table, picture->quality,
(s->width + 15) / 16);
if (avctx->debug & FF_DEBUG_QP)
av_log(avctx, AV_LOG_DEBUG,
"QP: %d\n", picture->quality);
picture->quality *= FF_QP2LAMBDA;
}
goto the_end;
case SOS:

View File

@ -35,8 +35,8 @@
typedef struct RawVideoContext {
AVClass *av_class;
uint32_t palette[AVPALETTE_COUNT];
unsigned char * buffer; /* block of memory for holding one frame */
int length; /* number of bytes in buffer */
unsigned char *buffer; /* block of memory for holding one frame */
int length; /* number of bytes in buffer */
int flip;
AVFrame pic; ///< AVCodecContext.coded_frame
int tff;
@ -63,7 +63,7 @@ static const PixelFormatTag pix_fmt_bps_avi[] = {
{ AV_PIX_FMT_RGB555, 16 },
{ AV_PIX_FMT_BGR24, 24 },
{ AV_PIX_FMT_BGRA, 32 },
{ AV_PIX_FMT_NONE, 0 },
{ AV_PIX_FMT_NONE, 0 },
};
static const PixelFormatTag pix_fmt_bps_mov[] = {
@ -77,10 +77,11 @@ static const PixelFormatTag pix_fmt_bps_mov[] = {
{ AV_PIX_FMT_RGB24, 24 },
{ AV_PIX_FMT_ARGB, 32 },
{ AV_PIX_FMT_MONOWHITE,33 },
{ AV_PIX_FMT_NONE, 0 },
{ AV_PIX_FMT_NONE, 0 },
};
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
unsigned int fourcc)
{
while (tags->pix_fmt >= 0) {
if (tags->fourcc == fourcc)
@ -101,14 +102,18 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
{
RawVideoContext *context = avctx->priv_data;
if (avctx->codec_tag == MKTAG('r','a','w',' ') || avctx->codec_tag == MKTAG('N','O','1','6'))
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_mov, avctx->bits_per_coded_sample);
else if (avctx->codec_tag == MKTAG('W','R','A','W'))
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi, avctx->bits_per_coded_sample);
if ( avctx->codec_tag == MKTAG('r','a','w',' ')
|| avctx->codec_tag == MKTAG('N','O','1','6'))
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_mov,
avctx->bits_per_coded_sample);
else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
avctx->bits_per_coded_sample);
else if (avctx->codec_tag)
avctx->pix_fmt = avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, avctx->codec_tag);
else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi, avctx->bits_per_coded_sample);
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
avctx->bits_per_coded_sample);
if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Pixel format was not specified and cannot be detected\n");
@ -116,10 +121,12 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
}
avpriv_set_systematic_pal2(context->palette, avctx->pix_fmt);
if((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
avctx->pix_fmt==AV_PIX_FMT_PAL8 &&
(!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))){
context->length = avpicture_get_size(avctx->pix_fmt, FFALIGN(avctx->width, 16), avctx->height);
if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
(!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))) {
context->length = avpicture_get_size(avctx->pix_fmt,
FFALIGN(avctx->width, 16),
avctx->height);
if (context->length < 0)
return context->length;
context->buffer = av_malloc(context->length);
@ -133,12 +140,14 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
context->pic.pict_type = AV_PICTURE_TYPE_I;
context->pic.key_frame = 1;
avctx->coded_frame= &context->pic;
avctx->coded_frame = &context->pic;
if((avctx->extradata_size >= 9 && !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
if ((avctx->extradata_size >= 9 &&
!memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
avctx->codec_tag == MKTAG('c','y','u','v') ||
avctx->codec_tag == MKTAG(3, 0, 0, 0) || avctx->codec_tag == MKTAG('W','R','A','W'))
context->flip=1;
avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
avctx->codec_tag == MKTAG('W','R','A','W'))
context->flip = 1;
if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /*we have interlaced material flagged in container */
avctx->coded_frame->interlaced_frame = 1;
@ -150,20 +159,20 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
return 0;
}
static void flip(AVCodecContext *avctx, AVPicture * picture){
picture->data[0] += picture->linesize[0] * (avctx->height-1);
static void flip(AVCodecContext *avctx, AVPicture *picture)
{
picture->data[0] += picture->linesize[0] * (avctx->height - 1);
picture->linesize[0] *= -1;
}
static int raw_decode(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int linesize_align = 4;
RawVideoContext *context = avctx->priv_data;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
RawVideoContext *context = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int linesize_align = 4;
int res, len;
AVFrame *frame = data;
@ -171,13 +180,13 @@ static int raw_decode(AVCodecContext *avctx,
frame->pict_type = avctx->coded_frame->pict_type;
frame->interlaced_frame = avctx->coded_frame->interlaced_frame;
frame->top_field_first = avctx->coded_frame->top_field_first;
frame->top_field_first = avctx->coded_frame->top_field_first;
frame->reordered_opaque = avctx->reordered_opaque;
frame->pkt_pts = avctx->pkt->pts;
frame->pkt_pos = avctx->pkt->pos;
frame->pkt_duration = avctx->pkt->duration;
if(context->tff>=0){
if (context->tff >= 0) {
frame->interlaced_frame = 1;
frame->top_field_first = context->tff;
}
@ -190,27 +199,27 @@ static int raw_decode(AVCodecContext *avctx,
int i;
uint8_t *dst = context->buffer;
buf_size = context->length - AVPALETTE_SIZE;
if (avctx->bits_per_coded_sample == 4){
for(i=0; 2*i+1 < buf_size && i<avpkt->size; i++){
dst[2*i+0]= buf[i]>>4;
dst[2*i+1]= buf[i]&15;
if (avctx->bits_per_coded_sample == 4) {
for (i = 0; 2 * i + 1 < buf_size && i<avpkt->size; i++) {
dst[2 * i + 0] = buf[i] >> 4;
dst[2 * i + 1] = buf[i] & 15;
}
linesize_align = 8;
} else {
av_assert0(avctx->bits_per_coded_sample == 2);
for(i=0; 4*i+3 < buf_size && i<avpkt->size; i++){
dst[4*i+0]= buf[i]>>6;
dst[4*i+1]= buf[i]>>4&3;
dst[4*i+2]= buf[i]>>2&3;
dst[4*i+3]= buf[i] &3;
for (i = 0; 4 * i + 3 < buf_size && i<avpkt->size; i++) {
dst[4 * i + 0] = buf[i] >> 6;
dst[4 * i + 1] = buf[i] >> 4 & 3;
dst[4 * i + 2] = buf[i] >> 2 & 3;
dst[4 * i + 3] = buf[i] & 3;
}
linesize_align = 16;
}
buf= dst;
buf = dst;
}
if(avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
buf += buf_size - context->length;
len = context->length - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
@ -222,63 +231,64 @@ static int raw_decode(AVCodecContext *avctx,
if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
avctx->width, avctx->height)) < 0)
return res;
if((avctx->pix_fmt==AV_PIX_FMT_PAL8 && buf_size < context->length) ||
(desc->flags & PIX_FMT_PSEUDOPAL)) {
frame->data[1]= (uint8_t*)context->palette;
if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->length) ||
(desc->flags & PIX_FMT_PSEUDOPAL)) {
frame->data[1] = (uint8_t*)context->palette;
}
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
memcpy(frame->data[1], pal, AVPALETTE_SIZE);
frame->palette_has_changed = 1;
}
}
if((avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
if ((avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
FFALIGN(frame->linesize[0], linesize_align)*avctx->height <= buf_size)
FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
if(avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
FFALIGN(frame->linesize[0], linesize_align)*avctx->height +
FFALIGN(frame->linesize[1], linesize_align)*((avctx->height+1)/2) <= buf_size) {
if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
int la0 = FFALIGN(frame->linesize[0], linesize_align);
frame->data[1] += (la0 - frame->linesize[0])*avctx->height;
frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
frame->linesize[0] = la0;
frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
}
if(context->flip)
if (context->flip)
flip(avctx, picture);
if ( avctx->codec_tag == MKTAG('Y', 'V', '1', '2')
|| avctx->codec_tag == MKTAG('Y', 'V', '1', '6')
|| avctx->codec_tag == MKTAG('Y', 'V', '2', '4')
|| avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
FFSWAP(uint8_t *, picture->data[1], picture->data[2]);
if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
picture->data[1] = picture->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
picture->data[1] = picture->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
picture->data[2] = picture->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
}
if(avctx->codec_tag == AV_RL32("yuv2") &&
avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
if (avctx->codec_tag == AV_RL32("yuv2") &&
avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
int x, y;
uint8_t *line = picture->data[0];
for(y = 0; y < avctx->height; y++) {
for(x = 0; x < avctx->width; x++)
line[2*x + 1] ^= 0x80;
for (y = 0; y < avctx->height; y++) {
for (x = 0; x < avctx->width; x++)
line[2 * x + 1] ^= 0x80;
line += picture->linesize[0];
}
}
if(avctx->codec_tag == AV_RL32("YVYU") &&
avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
if (avctx->codec_tag == AV_RL32("YVYU") &&
avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
int x, y;
uint8_t *line = picture->data[0];
for(y = 0; y < avctx->height; y++) {