1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

Merge commit 'fe80fa53bc4977ef6e8e749a9df8ca5590a09307'

* commit 'fe80fa53bc4977ef6e8e749a9df8ca5590a09307':
  4xm: replace forcing EMU_EDGE by a copy

Conflicts:
	libavcodec/4xm.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2014-01-06 13:48:46 +01:00
commit d1134ababd

View File

@ -26,6 +26,7 @@
#include "libavutil/avassert.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
@ -131,7 +132,8 @@ typedef struct CFrameBuffer {
typedef struct FourXContext {
AVCodecContext *avctx;
DSPContext dsp;
AVFrame *current_picture, *last_picture;
uint16_t *frame_buffer;
uint16_t *last_frame_buffer;
GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb;
GetByteContext g;
@ -341,7 +343,7 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int code = get_vlc2(&f->gb,
block_type_vlc[1 - (f->version > 1)][index].table,
BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = (uint16_t *)f->last_picture->data[0];
uint16_t *start = f->last_frame_buffer;
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
int ret;
int scale = 1;
@ -414,29 +416,18 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
return 0;
}
static int decode_p_frame(FourXContext *f, AVFrame *frame,
const uint8_t *buf, int length)
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *dst = (uint16_t *)frame->data[0];
const int stride = frame->linesize[0] >> 1;
uint16_t *dst = f->frame_buffer;
uint16_t *src;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
int ret;
if (!f->last_picture->data[0]) {
if ((ret = ff_get_buffer(f->avctx, f->last_picture,
AV_GET_BUFFER_FLAG_REF)) < 0) {
return ret;
}
for (y=0; y<f->avctx->height; y++)
memset(f->last_picture->data[0] + y*f->last_picture->linesize[0], 0, 2*f->avctx->width);
}
src = (uint16_t *)f->last_picture->data[0];
src = f->last_frame_buffer;
if (f->version > 1) {
extra = 20;
@ -476,14 +467,14 @@ static int decode_p_frame(FourXContext *f, AVFrame *frame,
bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset);
init_mv(f, frame->linesize[0]);
init_mv(f, width * 2);
for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8)
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, stride)) < 0)
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, width)) < 0)
return ret;
src += 8 * stride;
dst += 8 * stride;
src += 8 * width;
dst += 8 * width;
}
return 0;
@ -548,12 +539,12 @@ static int decode_i_block(FourXContext *f, int16_t *block)
return 0;
}
static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
static inline void idct_put(FourXContext *f, int x, int y)
{
int16_t (*block)[64] = f->block;
int stride = frame->linesize[0] >> 1;
int stride = f->avctx->width;
int i;
uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
uint16_t *dst = f->frame_buffer + y * stride + x;
for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8;
@ -713,14 +704,13 @@ static int mix(int c0, int c1)
return red / 3 * 1024 + green / 3 * 32 + blue / 3;
}
static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y, x2, y2;
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = (uint16_t*)frame->data[0];
const int stride = frame->linesize[0]>>1;
uint16_t *dst = f->frame_buffer;
const uint8_t *buf_end = buf + length;
GetByteContext g3;
@ -751,18 +741,18 @@ static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf,
for (y2 = 0; y2 < 16; y2++) {
for (x2 = 0; x2 < 16; x2++) {
int index = 2 * (x2 >> 2) + 8 * (y2 >> 2);
dst[y2 * stride + x2] = color[(bits >> index) & 3];
dst[y2 * width + x2] = color[(bits >> index) & 3];
}
}
dst += 16;
}
dst += 16 * stride - x;
dst += 16 * width - x;
}
return 0;
}
static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
{
int x, y, ret;
const int width = f->avctx->width;
@ -816,7 +806,7 @@ static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, i
if ((ret = decode_i_mb(f)) < 0)
return ret;
idct_put(f, frame, x, y);
idct_put(f, x, y);
}
}
@ -919,29 +909,24 @@ static int decode_frame(AVCodecContext *avctx, void *data,
frame_size = buf_size - 12;
}
FFSWAP(AVFrame*, f->current_picture, f->last_picture);
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
if ((ret = ff_reget_buffer(avctx, f->current_picture)) < 0)
if ((ret = ff_get_buffer(avctx, picture, 0)) < 0)
return ret;
if (frame_4cc == AV_RL32("ifr2")) {
f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, f->current_picture, buf - 4, frame_size + 4)) < 0) {
picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
return ret;
}
} else if (frame_4cc == AV_RL32("ifrm")) {
f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, f->current_picture, buf, frame_size)) < 0) {
picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
return ret;
}
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
f->current_picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, f->current_picture, buf, frame_size)) < 0) {
picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
return ret;
}
@ -953,10 +938,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
buf_size);
}
f->current_picture->key_frame = f->current_picture->pict_type == AV_PICTURE_TYPE_I;
picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
av_image_copy_plane(picture->data[0], picture->linesize[0],
(const uint8_t*)f->frame_buffer, avctx->width * 2,
avctx->width * 2, avctx->height);
FFSWAP(uint16_t *, f->frame_buffer, f->last_frame_buffer);
if ((ret = av_frame_ref(picture, f->current_picture)) < 0)
return ret;
*got_frame = 1;
emms_c();
@ -964,9 +952,28 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int i;
av_freep(&f->frame_buffer);
av_freep(&f->last_frame_buffer);
av_freep(&f->bitstream_buffer);
f->bitstream_buffer_size = 0;
for (i = 0; i < CFRAME_BUFFER_COUNT; i++) {
av_freep(&f->cfrm[i].data);
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int ret;
if (avctx->extradata_size != 4 || !avctx->extradata) {
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
@ -977,6 +984,17 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
if (ret < 0)
return ret;
f->frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
f->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
if (!f->frame_buffer || !f->last_frame_buffer) {
decode_end(avctx);
return AVERROR(ENOMEM);
}
f->version = AV_RL32(avctx->extradata) >> 16;
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
@ -987,30 +1005,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = av_frame_alloc();
f->last_picture = av_frame_alloc();
if (!f->current_picture || !f->last_picture)
return AVERROR(ENOMEM);
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int i;
av_freep(&f->bitstream_buffer);
f->bitstream_buffer_size = 0;
for (i = 0; i < CFRAME_BUFFER_COUNT; i++) {
av_freep(&f->cfrm[i].data);
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
av_frame_free(&f->current_picture);
av_frame_free(&f->last_picture);
return 0;
}