1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

vmnc: use the AVFrame API properly.

This commit is contained in:
Anton Khirnov 2013-11-09 10:14:46 +01:00
parent 04f30711d8
commit 3c8ea9d4a7

View File

@ -57,7 +57,7 @@ enum HexTile_Flags {
*/ */
typedef struct VmncContext { typedef struct VmncContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic; AVFrame *pic;
int bpp; int bpp;
int bpp2; int bpp2;
@ -319,15 +319,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
uint8_t *outptr; uint8_t *outptr;
int dx, dy, w, h, depth, enc, chunks, res, size_left, ret; int dx, dy, w, h, depth, enc, chunks, res, size_left, ret;
if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) { if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
bytestream2_init(gb, buf, buf_size); bytestream2_init(gb, buf, buf_size);
c->pic.key_frame = 0; c->pic->key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; c->pic->pict_type = AV_PICTURE_TYPE_P;
// restore screen after cursor // restore screen after cursor
if (c->screendta) { if (c->screendta) {
@ -349,11 +349,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
dy = 0; dy = 0;
} }
if ((w > 0) && (h > 0)) { if ((w > 0) && (h > 0)) {
outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; outptr = c->pic->data[0] + dx * c->bpp2 + dy * c->pic->linesize[0];
for (i = 0; i < h; i++) { for (i = 0; i < h; i++) {
memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2,
w * c->bpp2); w * c->bpp2);
outptr += c->pic.linesize[0]; outptr += c->pic->linesize[0];
} }
} }
} }
@ -365,7 +365,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
w = bytestream2_get_be16(gb); w = bytestream2_get_be16(gb);
h = bytestream2_get_be16(gb); h = bytestream2_get_be16(gb);
enc = bytestream2_get_be32(gb); enc = bytestream2_get_be32(gb);
outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; outptr = c->pic->data[0] + dx * c->bpp2 + dy * c->pic->linesize[0];
size_left = bytestream2_get_bytes_left(gb); size_left = bytestream2_get_bytes_left(gb);
switch (enc) { switch (enc) {
case MAGIC_WMVd: // cursor case MAGIC_WMVd: // cursor
@ -415,8 +415,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
bytestream2_skip(gb, 4); bytestream2_skip(gb, 4);
break; break;
case MAGIC_WMVi: // ServerInitialization struct case MAGIC_WMVi: // ServerInitialization struct
c->pic.key_frame = 1; c->pic->key_frame = 1;
c->pic.pict_type = AV_PICTURE_TYPE_I; c->pic->pict_type = AV_PICTURE_TYPE_I;
depth = bytestream2_get_byte(gb); depth = bytestream2_get_byte(gb);
if (depth != c->bpp) { if (depth != c->bpp) {
av_log(avctx, AV_LOG_INFO, av_log(avctx, AV_LOG_INFO,
@ -451,7 +451,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
paint_raw(outptr, w, h, gb, c->bpp2, c->bigendian, paint_raw(outptr, w, h, gb, c->bpp2, c->bigendian,
c->pic.linesize[0]); c->pic->linesize[0]);
break; break;
case 0x00000005: // HexTile encoded rectangle case 0x00000005: // HexTile encoded rectangle
if ((dx + w > c->width) || (dy + h > c->height)) { if ((dx + w > c->width) || (dy + h > c->height)) {
@ -460,7 +460,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
w, h, dx, dy, c->width, c->height); w, h, dx, dy, c->width, c->height);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
res = decode_hextile(c, outptr, gb, w, h, c->pic.linesize[0]); res = decode_hextile(c, outptr, gb, w, h, c->pic->linesize[0]);
if (res < 0) if (res < 0)
return res; return res;
break; break;
@ -489,18 +489,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
dy = 0; dy = 0;
} }
if ((w > 0) && (h > 0)) { if ((w > 0) && (h > 0)) {
outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0]; outptr = c->pic->data[0] + dx * c->bpp2 + dy * c->pic->linesize[0];
for (i = 0; i < h; i++) { for (i = 0; i < h; i++) {
memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr,
w * c->bpp2); w * c->bpp2);
outptr += c->pic.linesize[0]; outptr += c->pic->linesize[0];
} }
outptr = c->pic.data[0]; outptr = c->pic->data[0];
put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y); put_cursor(outptr, c->pic->linesize[0], c, c->cur_x, c->cur_y);
} }
} }
*got_frame = 1; *got_frame = 1;
if ((ret = av_frame_ref(data, &c->pic)) < 0) if ((ret = av_frame_ref(data, c->pic)) < 0)
return ret; return ret;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
@ -532,7 +532,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
avcodec_get_frame_defaults(&c->pic); c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -541,7 +543,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
VmncContext * const c = avctx->priv_data; VmncContext * const c = avctx->priv_data;
av_frame_unref(&c->pic); av_frame_free(&c->pic);
av_free(c->curbits); av_free(c->curbits);
av_free(c->curmask); av_free(c->curmask);