1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

ulti: use the AVFrame API properly.

This commit is contained in:
Anton Khirnov 2013-11-09 10:14:46 +01:00
parent 6792559f8a
commit c8a525197f

View File

@ -37,7 +37,7 @@
typedef struct UltimotionDecodeContext { typedef struct UltimotionDecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
int width, height, blocks; int width, height, blocks;
AVFrame frame; AVFrame *frame;
const uint8_t *ulti_codebook; const uint8_t *ulti_codebook;
GetByteContext gb; GetByteContext gb;
} UltimotionDecodeContext; } UltimotionDecodeContext;
@ -51,18 +51,19 @@ static av_cold int ulti_decode_init(AVCodecContext *avctx)
s->height = avctx->height; s->height = avctx->height;
s->blocks = (s->width / 8) * (s->height / 8); s->blocks = (s->width / 8) * (s->height / 8);
avctx->pix_fmt = AV_PIX_FMT_YUV410P; avctx->pix_fmt = AV_PIX_FMT_YUV410P;
avctx->coded_frame = &s->frame;
s->ulti_codebook = ulti_codebook; s->ulti_codebook = ulti_codebook;
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
static av_cold int ulti_decode_end(AVCodecContext *avctx){ static av_cold int ulti_decode_end(AVCodecContext *avctx){
UltimotionDecodeContext *s = avctx->priv_data; UltimotionDecodeContext *s = avctx->priv_data;
AVFrame *pic = &s->frame;
av_frame_unref(pic); av_frame_free(&s->frame);
return 0; return 0;
} }
@ -226,7 +227,7 @@ static int ulti_decode_frame(AVCodecContext *avctx,
int skip; int skip;
int tmp; int tmp;
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -369,7 +370,7 @@ static int ulti_decode_frame(AVCodecContext *avctx,
Luma[14] = (tmp >> 6) & 0x3F; Luma[14] = (tmp >> 6) & 0x3F;
Luma[15] = tmp & 0x3F; Luma[15] = tmp & 0x3F;
ulti_convert_yuv(&s->frame, tx, ty, Luma, chroma); ulti_convert_yuv(s->frame, tx, ty, Luma, chroma);
} else { } else {
if (bytestream2_get_bytes_left(&s->gb) < 4) if (bytestream2_get_bytes_left(&s->gb) < 4)
goto err; goto err;
@ -381,20 +382,20 @@ static int ulti_decode_frame(AVCodecContext *avctx,
Y[1] = tmp & 0x3F; Y[1] = tmp & 0x3F;
Y[2] = bytestream2_get_byteu(&s->gb) & 0x3F; Y[2] = bytestream2_get_byteu(&s->gb) & 0x3F;
Y[3] = bytestream2_get_byteu(&s->gb) & 0x3F; Y[3] = bytestream2_get_byteu(&s->gb) & 0x3F;
ulti_grad(&s->frame, tx, ty, Y, chroma, angle); //draw block ulti_grad(s->frame, tx, ty, Y, chroma, angle); //draw block
} else { // some patterns } else { // some patterns
int f0, f1; int f0, f1;
f0 = bytestream2_get_byteu(&s->gb); f0 = bytestream2_get_byteu(&s->gb);
f1 = tmp; f1 = tmp;
Y[0] = bytestream2_get_byteu(&s->gb) & 0x3F; Y[0] = bytestream2_get_byteu(&s->gb) & 0x3F;
Y[1] = bytestream2_get_byteu(&s->gb) & 0x3F; Y[1] = bytestream2_get_byteu(&s->gb) & 0x3F;
ulti_pattern(&s->frame, tx, ty, f1, f0, Y[0], Y[1], chroma); ulti_pattern(s->frame, tx, ty, f1, f0, Y[0], Y[1], chroma);
} }
} }
break; break;
} }
if(code != 3) if(code != 3)
ulti_grad(&s->frame, tx, ty, Y, chroma, angle); // draw block ulti_grad(s->frame, tx, ty, Y, chroma, angle); // draw block
} }
blocks++; blocks++;
x += 8; x += 8;
@ -406,7 +407,7 @@ static int ulti_decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
if ((ret = av_frame_ref(data, &s->frame)) < 0) if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret; return ret;
return buf_size; return buf_size;