1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-01-13 21:28:01 +02:00

Merge commit '3c6e5a840c45fd3b832e86881602a72e47d46f19'

* commit '3c6e5a840c45fd3b832e86881602a72e47d46f19':
  rl2: use fixed-width integer types where appropriate
  rl2: return meaningful error codes.
  cljr: return a meaningful error code.
  fraps: cosmetics, reformat

Conflicts:
	libavcodec/fraps.c
	libavcodec/rl2.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-01-07 01:00:04 +01:00
commit d30660306c
2 changed files with 53 additions and 49 deletions

View File

@ -43,7 +43,7 @@
/** /**
* local variable storage * local variable storage
*/ */
typedef struct FrapsContext{ typedef struct FrapsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
uint8_t *tmpbuf; uint8_t *tmpbuf;
@ -64,7 +64,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&s->frame); avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame; avctx->coded_frame = &s->frame;
s->avctx = avctx; s->avctx = avctx;
s->tmpbuf = NULL; s->tmpbuf = NULL;
ff_dsputil_init(&s->dsp, avctx); ff_dsputil_init(&s->dsp, avctx);
@ -76,7 +76,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
* Comparator - our nodes should ascend by count * Comparator - our nodes should ascend by count
* but with preserved symbol order * but with preserved symbol order
*/ */
static int huff_cmp(const void *va, const void *vb){ static int huff_cmp(const void *va, const void *vb)
{
const Node *a = va, *b = vb; const Node *a = va, *b = vb;
return (a->count - b->count)*256 + a->sym - b->sym; return (a->count - b->count)*256 + a->sym - b->sym;
} }
@ -93,7 +94,7 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
VLC vlc; VLC vlc;
Node nodes[512]; Node nodes[512];
for(i = 0; i < 256; i++) for (i = 0; i < 256; i++)
nodes[i].count = bytestream_get_le32(&src); nodes[i].count = bytestream_get_le32(&src);
size -= 1024; size -= 1024;
if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, nodes, huff_cmp, if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, nodes, huff_cmp,
@ -105,14 +106,16 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
s->dsp.bswap_buf((uint32_t *)s->tmpbuf, (const uint32_t *)src, size >> 2); s->dsp.bswap_buf((uint32_t *)s->tmpbuf, (const uint32_t *)src, size >> 2);
init_get_bits(&gb, s->tmpbuf, size * 8); init_get_bits(&gb, s->tmpbuf, size * 8);
for(j = 0; j < h; j++){ for (j = 0; j < h; j++) {
for(i = 0; i < w*step; i += step){ for (i = 0; i < w*step; i += step) {
dst[i] = get_vlc2(&gb, vlc.table, 9, 3); dst[i] = get_vlc2(&gb, vlc.table, 9, 3);
/* lines are stored as deltas between previous lines /* lines are stored as deltas between previous lines
* and we need to add 0x80 to the first lines of chroma planes * and we need to add 0x80 to the first lines of chroma planes
*/ */
if(j) dst[i] += dst[i - stride]; if (j)
else if(Uoff) dst[i] += 0x80; dst[i] += dst[i - stride];
else if (Uoff)
dst[i] += 0x80;
if (get_bits_left(&gb) < 0) { if (get_bits_left(&gb) < 0) {
ff_free_vlc(&vlc); ff_free_vlc(&vlc);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -128,11 +131,11 @@ static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FrapsContext * const s = avctx->priv_data; FrapsContext * const s = avctx->priv_data;
AVFrame *frame = data; const uint8_t *buf = avpkt->data;
AVFrame * const f = &s->frame; int buf_size = avpkt->size;
AVFrame *frame = data;
AVFrame * const f = &s->frame;
uint32_t header; uint32_t header;
unsigned int version,header_size; unsigned int version,header_size;
unsigned int x, y; unsigned int x, y;
@ -144,8 +147,8 @@ static int decode_frame(AVCodecContext *avctx,
uint8_t *out; uint8_t *out;
enum AVPixelFormat pix_fmt; enum AVPixelFormat pix_fmt;
header = AV_RL32(buf); header = AV_RL32(buf);
version = header & 0xff; version = header & 0xff;
header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */ header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
if (version > 5) { if (version > 5) {
@ -158,7 +161,7 @@ static int decode_frame(AVCodecContext *avctx,
buf += header_size; buf += header_size;
if (version < 2) { if (version < 2) {
unsigned needed_size = avctx->width*avctx->height*3; unsigned needed_size = avctx->width * avctx->height * 3;
if (version == 0) needed_size /= 2; if (version == 0) needed_size /= 2;
needed_size += header_size; needed_size += header_size;
/* bit 31 means same as previous pic */ /* bit 31 means same as previous pic */
@ -182,15 +185,15 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n"); av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
for(i = 0; i < planes; i++) { for (i = 0; i < planes; i++) {
offs[i] = AV_RL32(buf + 4 + i * 4); offs[i] = AV_RL32(buf + 4 + i * 4);
if(offs[i] >= buf_size - header_size || (i && offs[i] <= offs[i - 1] + 1024)) { if (offs[i] >= buf_size - header_size || (i && offs[i] <= offs[i - 1] + 1024)) {
av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i); av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
} }
offs[planes] = buf_size - header_size; offs[planes] = buf_size - header_size;
for(i = 0; i < planes; i++) { for (i = 0; i < planes; i++) {
av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024); av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024);
if (!s->tmpbuf) if (!s->tmpbuf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -215,23 +218,23 @@ static int decode_frame(AVCodecContext *avctx,
return ret; return ret;
} }
switch(version) { switch (version) {
case 0: case 0:
default: default:
/* Fraps v0 is a reordered YUV420 */ /* Fraps v0 is a reordered YUV420 */
if ( (avctx->width % 8) != 0 || (avctx->height % 2) != 0 ) { if (((avctx->width % 8) != 0) || ((avctx->height % 2) != 0)) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame size %dx%d\n", av_log(avctx, AV_LOG_ERROR, "Invalid frame size %dx%d\n",
avctx->width, avctx->height); avctx->width, avctx->height);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
buf32=(const uint32_t*)buf; buf32 = (const uint32_t*)buf;
for(y=0; y<avctx->height/2; y++){ for (y = 0; y < avctx->height / 2; y++) {
luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ]; luma1 = (uint32_t*)&f->data[0][ y * 2 * f->linesize[0] ];
luma2=(uint32_t*)&f->data[0][ (y*2+1)*f->linesize[0] ]; luma2 = (uint32_t*)&f->data[0][ (y * 2 + 1) * f->linesize[0] ];
cr=(uint32_t*)&f->data[1][ y*f->linesize[1] ]; cr = (uint32_t*)&f->data[1][ y * f->linesize[1] ];
cb=(uint32_t*)&f->data[2][ y*f->linesize[2] ]; cb = (uint32_t*)&f->data[2][ y * f->linesize[2] ];
for(x=0; x<avctx->width; x+=8){ for (x = 0; x < avctx->width; x += 8) {
*luma1++ = *buf32++; *luma1++ = *buf32++;
*luma1++ = *buf32++; *luma1++ = *buf32++;
*luma2++ = *buf32++; *luma2++ = *buf32++;
@ -244,10 +247,11 @@ static int decode_frame(AVCodecContext *avctx,
case 1: case 1:
/* Fraps v1 is an upside-down BGR24 */ /* Fraps v1 is an upside-down BGR24 */
for(y=0; y<avctx->height; y++) for (y=0; y<avctx->height; y++)
memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ], memcpy(&f->data[0][(avctx->height - y) * f->linesize[0]],
&buf[y*avctx->width*3], &buf[y * avctx->width * 3],
3*avctx->width); 3 * avctx->width);
break; break;
@ -257,7 +261,7 @@ static int decode_frame(AVCodecContext *avctx,
* Fraps v2 is Huffman-coded YUV420 planes * Fraps v2 is Huffman-coded YUV420 planes
* Fraps v4 is virtually the same * Fraps v4 is virtually the same
*/ */
for(i = 0; i < planes; i++){ for (i = 0; i < planes; i++) {
is_chroma = !!i; is_chroma = !!i;
if ((ret = fraps2_decode_plane(s, f->data[i], f->linesize[i], if ((ret = fraps2_decode_plane(s, f->data[i], f->linesize[i],
avctx->width >> is_chroma, avctx->width >> is_chroma,
@ -272,7 +276,7 @@ static int decode_frame(AVCodecContext *avctx,
case 3: case 3:
case 5: case 5:
/* Virtually the same as version 4, but is for RGB24 */ /* Virtually the same as version 4, but is for RGB24 */
for(i = 0; i < planes; i++){ for (i = 0; i < planes; i++) {
if ((ret = fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)), if ((ret = fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)),
-f->linesize[0], avctx->width, avctx->height, -f->linesize[0], avctx->width, avctx->height,
buf + offs[i], offs[i + 1] - offs[i], 0, 3)) < 0) { buf + offs[i], offs[i + 1] - offs[i], 0, 3)) < 0) {
@ -282,7 +286,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
out = f->data[0]; out = f->data[0];
// convert pseudo-YUV into real RGB // convert pseudo-YUV into real RGB
for(j = 0; j < avctx->height; j++){ for (j = 0; j < avctx->height; j++) {
uint8_t *line_end = out + 3*avctx->width; uint8_t *line_end = out + 3*avctx->width;
while (out < line_end) { while (out < line_end) {
out[0] += out[1]; out[0] += out[1];

View File

@ -43,10 +43,10 @@ typedef struct Rl2Context {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
unsigned short video_base; ///< initial drawing offset uint16_t video_base; ///< initial drawing offset
unsigned int clr_count; ///< number of used colors (currently unused) uint32_t clr_count; ///< number of used colors (currently unused)
unsigned char* back_frame; ///< background frame uint8_t *back_frame; ///< background frame
unsigned int palette[AVPALETTE_COUNT]; uint32_t palette[AVPALETTE_COUNT];
} Rl2Context; } Rl2Context;
/** /**
@ -58,16 +58,17 @@ typedef struct Rl2Context {
* @param stride stride of the output buffer * @param stride stride of the output buffer
* @param video_base offset of the rle data inside the frame * @param video_base offset of the rle data inside the frame
*/ */
static void rl2_rle_decode(Rl2Context *s,const unsigned char* in,int size, static void rl2_rle_decode(Rl2Context *s, const uint8_t *in, int size,
unsigned char* out,int stride,int video_base){ uint8_t *out, int stride, int video_base)
{
int base_x = video_base % s->avctx->width; int base_x = video_base % s->avctx->width;
int base_y = video_base / s->avctx->width; int base_y = video_base / s->avctx->width;
int stride_adj = stride - s->avctx->width; int stride_adj = stride - s->avctx->width;
int i; int i;
const unsigned char* back_frame = s->back_frame; const uint8_t *back_frame = s->back_frame;
const unsigned char* in_end = in + size; const uint8_t *in_end = in + size;
const unsigned char* out_end = out + stride * s->avctx->height; const uint8_t *out_end = out + stride * s->avctx->height;
unsigned char* line_end; uint8_t *line_end;
/** copy start of the background frame */ /** copy start of the background frame */
for(i=0;i<=base_y;i++){ for(i=0;i<=base_y;i++){
@ -82,7 +83,7 @@ static void rl2_rle_decode(Rl2Context *s,const unsigned char* in,int size,
/** decode the variable part of the frame */ /** decode the variable part of the frame */
while(in < in_end){ while(in < in_end){
unsigned char val = *in++; uint8_t val = *in++;
int len = 1; int len = 1;
if(val >= 0x80){ if(val >= 0x80){
if(in >= in_end) if(in >= in_end)
@ -141,7 +142,7 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx)
/** parse extra data */ /** parse extra data */
if(!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE){ if(!avctx->extradata || avctx->extradata_size < EXTRADATA1_SIZE){
av_log(avctx, AV_LOG_ERROR, "invalid extradata size\n"); av_log(avctx, AV_LOG_ERROR, "invalid extradata size\n");
return AVERROR_INVALIDDATA; return AVERROR(EINVAL);
} }
/** get frame_offset */ /** get frame_offset */
@ -161,7 +162,7 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx)
back_size = avctx->extradata_size - EXTRADATA1_SIZE; back_size = avctx->extradata_size - EXTRADATA1_SIZE;
if(back_size > 0){ if(back_size > 0){
unsigned char* back_frame = av_mallocz(avctx->width*avctx->height); uint8_t *back_frame = av_mallocz(avctx->width*avctx->height);
if(!back_frame) if(!back_frame)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
rl2_rle_decode(s,avctx->extradata + EXTRADATA1_SIZE,back_size, rl2_rle_decode(s,avctx->extradata + EXTRADATA1_SIZE,back_size,
@ -177,9 +178,8 @@ static int rl2_decode_frame(AVCodecContext *avctx,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int ret, buf_size = avpkt->size;
Rl2Context *s = avctx->priv_data; Rl2Context *s = avctx->priv_data;
int ret;
if(s->frame.data[0]) if(s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); avctx->release_buffer(avctx, &s->frame);