mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-21 10:55:51 +02:00
Merge commit '4a4841d4e0f0dc50998511bf6c48b518012024db'
* commit '4a4841d4e0f0dc50998511bf6c48b518012024db': fraps: use the AVFrame API properly. rpza: use the AVFrame API properly. motionpixels: use the AVFrame API properly. vmdvideo: use the AVFrame API properly. Conflicts: libavcodec/fraps.c libavcodec/motionpixels.c libavcodec/rpza.c libavcodec/vmdav.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
4362f272c0
@ -36,7 +36,7 @@ typedef struct HuffCode {
|
||||
|
||||
typedef struct MotionPixelsContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
AVFrame *frame;
|
||||
DSPContext dsp;
|
||||
uint8_t *changes_map;
|
||||
int offset_bits_len;
|
||||
@ -50,6 +50,19 @@ typedef struct MotionPixelsContext {
|
||||
int bswapbuf_size;
|
||||
} MotionPixelsContext;
|
||||
|
||||
static av_cold int mp_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MotionPixelsContext *mp = avctx->priv_data;
|
||||
|
||||
av_freep(&mp->changes_map);
|
||||
av_freep(&mp->vpt);
|
||||
av_freep(&mp->hpt);
|
||||
av_freep(&mp->bswapbuf);
|
||||
av_frame_free(&mp->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MotionPixelsContext *mp = avctx->priv_data;
|
||||
@ -75,7 +88,13 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555;
|
||||
avcodec_get_frame_defaults(&mp->frame);
|
||||
|
||||
mp->frame = av_frame_alloc();
|
||||
if (!mp->frame) {
|
||||
mp_decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -96,14 +115,14 @@ static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int
|
||||
continue;
|
||||
w = FFMIN(w, mp->avctx->width - x);
|
||||
h = FFMIN(h, mp->avctx->height - y);
|
||||
pixels = (uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2];
|
||||
pixels = (uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
|
||||
while (h--) {
|
||||
mp->changes_map[offset] = w;
|
||||
if (read_color)
|
||||
for (i = 0; i < w; ++i)
|
||||
pixels[i] = color;
|
||||
offset += mp->avctx->width;
|
||||
pixels += mp->frame.linesize[0] / 2;
|
||||
pixels += mp->frame->linesize[0] / 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,7 +184,7 @@ static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y)
|
||||
{
|
||||
int color;
|
||||
|
||||
color = *(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2];
|
||||
color = *(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
|
||||
return mp_rgb_yuv_table[color];
|
||||
}
|
||||
|
||||
@ -174,7 +193,7 @@ static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const Yuv
|
||||
int color;
|
||||
|
||||
color = mp_yuv_to_rgb(p->y, p->v, p->u, 1);
|
||||
*(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2] = color;
|
||||
*(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2] = color;
|
||||
}
|
||||
|
||||
static int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb)
|
||||
@ -271,7 +290,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
||||
GetBitContext gb;
|
||||
int i, count1, count2, sz, ret;
|
||||
|
||||
if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0)
|
||||
if ((ret = ff_reget_buffer(avctx, mp->frame)) < 0)
|
||||
return ret;
|
||||
|
||||
/* le32 bitstream msb first */
|
||||
@ -296,7 +315,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
||||
goto end;
|
||||
|
||||
if (mp->changes_map[0] == 0) {
|
||||
*(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
|
||||
*(uint16_t *)mp->frame->data[0] = get_bits(&gb, 15);
|
||||
mp->changes_map[0] = 1;
|
||||
}
|
||||
if (mp_read_codes_table(mp, &gb) < 0)
|
||||
@ -316,25 +335,12 @@ static int mp_decode_frame(AVCodecContext *avctx,
|
||||
ff_free_vlc(&mp->vlc);
|
||||
|
||||
end:
|
||||
if ((ret = av_frame_ref(data, &mp->frame)) < 0)
|
||||
if ((ret = av_frame_ref(data, mp->frame)) < 0)
|
||||
return ret;
|
||||
*got_frame = 1;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int mp_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MotionPixelsContext *mp = avctx->priv_data;
|
||||
|
||||
av_freep(&mp->changes_map);
|
||||
av_freep(&mp->vpt);
|
||||
av_freep(&mp->hpt);
|
||||
av_freep(&mp->bswapbuf);
|
||||
av_frame_unref(&mp->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVCodec ff_motionpixels_decoder = {
|
||||
.name = "motionpixels",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"),
|
||||
|
@ -46,7 +46,7 @@
|
||||
typedef struct RpzaContext {
|
||||
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
AVFrame *frame;
|
||||
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
@ -72,7 +72,7 @@ typedef struct RpzaContext {
|
||||
static void rpza_decode_stream(RpzaContext *s)
|
||||
{
|
||||
int width = s->avctx->width;
|
||||
int stride = s->frame.linesize[0] / 2;
|
||||
int stride = s->frame->linesize[0] / 2;
|
||||
int row_inc = stride - 4;
|
||||
int stream_ptr = 0;
|
||||
int chunk_size;
|
||||
@ -82,7 +82,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short color4[4];
|
||||
unsigned char index, idx;
|
||||
unsigned short ta, tb;
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
unsigned short *pixels = (unsigned short *)s->frame->data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
@ -239,7 +239,9 @@ static av_cold int rpza_decode_init(AVCodecContext *avctx)
|
||||
s->avctx = avctx;
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
s->frame = av_frame_alloc();
|
||||
if (!s->frame)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -256,12 +258,12 @@ static int rpza_decode_frame(AVCodecContext *avctx,
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
|
||||
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
|
||||
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
|
||||
return ret;
|
||||
|
||||
rpza_decode_stream(s);
|
||||
|
||||
if ((ret = av_frame_ref(data, &s->frame)) < 0)
|
||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
||||
return ret;
|
||||
|
||||
*got_frame = 1;
|
||||
@ -274,7 +276,7 @@ static av_cold int rpza_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
RpzaContext *s = avctx->priv_data;
|
||||
|
||||
av_frame_unref(&s->frame);
|
||||
av_frame_free(&s->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@
|
||||
typedef struct VmdVideoContext {
|
||||
|
||||
AVCodecContext *avctx;
|
||||
AVFrame prev_frame;
|
||||
AVFrame *prev_frame;
|
||||
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
@ -244,11 +244,11 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
|
||||
/* if only a certain region will be updated, copy the entire previous
|
||||
* frame before the decode */
|
||||
if (s->prev_frame.data[0] &&
|
||||
if (s->prev_frame->data[0] &&
|
||||
(frame_x || frame_y || (frame_width != s->avctx->width) ||
|
||||
(frame_height != s->avctx->height))) {
|
||||
|
||||
memcpy(frame->data[0], s->prev_frame.data[0],
|
||||
memcpy(frame->data[0], s->prev_frame->data[0],
|
||||
s->avctx->height * frame->linesize[0]);
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
}
|
||||
|
||||
dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
|
||||
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
|
||||
pp = &s->prev_frame->data[0][frame_y * s->prev_frame->linesize[0] + frame_x];
|
||||
switch (meth) {
|
||||
case 1:
|
||||
for (i = 0; i < frame_height; i++) {
|
||||
@ -307,7 +307,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
ofs += len;
|
||||
} else {
|
||||
/* interframe pixel copy */
|
||||
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
|
||||
if (ofs + len + 1 > frame_width || !s->prev_frame->data[0])
|
||||
return AVERROR_INVALIDDATA;
|
||||
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
||||
ofs += len + 1;
|
||||
@ -320,7 +320,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
dp += frame->linesize[0];
|
||||
pp += s->prev_frame.linesize[0];
|
||||
pp += s->prev_frame->linesize[0];
|
||||
}
|
||||
break;
|
||||
|
||||
@ -328,7 +328,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
for (i = 0; i < frame_height; i++) {
|
||||
bytestream2_get_buffer(&gb, dp, frame_width);
|
||||
dp += frame->linesize[0];
|
||||
pp += s->prev_frame.linesize[0];
|
||||
pp += s->prev_frame->linesize[0];
|
||||
}
|
||||
break;
|
||||
|
||||
@ -353,7 +353,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
}
|
||||
} else {
|
||||
/* interframe pixel copy */
|
||||
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
|
||||
if (ofs + len + 1 > frame_width || !s->prev_frame->data[0])
|
||||
return AVERROR_INVALIDDATA;
|
||||
memcpy(&dp[ofs], &pp[ofs], len + 1);
|
||||
ofs += len + 1;
|
||||
@ -366,13 +366,24 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
dp += frame->linesize[0];
|
||||
pp += s->prev_frame.linesize[0];
|
||||
pp += s->prev_frame->linesize[0];
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
VmdVideoContext *s = avctx->priv_data;
|
||||
|
||||
av_frame_free(&s->prev_frame);
|
||||
av_freep(&s->unpack_buffer);
|
||||
s->unpack_buffer_size = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
VmdVideoContext *s = avctx->priv_data;
|
||||
@ -412,7 +423,11 @@ static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
|
||||
palette32[i] |= palette32[i] >> 6 & 0x30303;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&s->prev_frame);
|
||||
s->prev_frame = av_frame_alloc();
|
||||
if (!s->prev_frame) {
|
||||
vmdvideo_decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -443,8 +458,8 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx,
|
||||
memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
|
||||
|
||||
/* shuffle frames */
|
||||
av_frame_unref(&s->prev_frame);
|
||||
if ((ret = av_frame_ref(&s->prev_frame, frame)) < 0)
|
||||
av_frame_unref(s->prev_frame);
|
||||
if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
|
||||
return ret;
|
||||
|
||||
*got_frame = 1;
|
||||
@ -453,18 +468,6 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx,
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
VmdVideoContext *s = avctx->priv_data;
|
||||
|
||||
av_frame_unref(&s->prev_frame);
|
||||
av_freep(&s->unpack_buffer);
|
||||
s->unpack_buffer_size = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Audio Decoder
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user