1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '759001c534287a96dc96d1e274665feb7059145d'

* commit '759001c534287a96dc96d1e274665feb7059145d':
  lavc decoders: work with refcounted frames.

Anton Khirnov (1):
      lavc decoders: work with refcounted frames.

Clément Bœsch (47):
      lavc/ansi: reset file
      lavc/ansi: re-do refcounted frame changes from Anton
      fraps: reset file
      lavc/fraps: switch to refcounted frames
      gifdec: reset file
      lavc/gifdec: switch to refcounted frames
      dsicinav: resolve conflicts
      smc: resolve conflicts
      zmbv: resolve conflicts
      rpza: resolve conflicts
      vble: resolve conflicts
      xxan: resolve conflicts
      targa: resolve conflicts
      vmnc: resolve conflicts
      utvideodec: resolve conflicts
      tscc: resolve conflicts
      ulti: resolve conflicts
      ffv1dec: resolve conflicts
      dnxhddec: resolve conflicts
      v210dec: resolve conflicts
      vp3: resolve conflicts
      vcr1: resolve conflicts
      v210x: resolve conflicts
      wavpack: resolve conflicts
      pngdec: fix compilation
      roqvideodec: resolve conflicts
      pictordec: resolve conflicts
      mdec: resolve conflicts
      tiertexseqv: resolve conflicts
      smacker: resolve conflicts
      vb: resolve conflicts
      vqavideo: resolve conflicts
      xl: resolve conflicts
      tmv: resolve conflicts
      vmdav: resolve conflicts
      truemotion1: resolve conflicts
      truemotion2: resolve conflicts
      lcldec: fix compilation
      libcelt_dec: fix compilation
      qdrw: fix compilation
      r210dec: fix compilation
      rl2: fix compilation
      wnv1: fix compilation
      yop: fix compilation
      tiff: resolve conflicts
      interplayvideo: fix compilation
      qpeg: resolve conflicts (FIXME/TESTME).

Hendrik Leppkes (33):
      012v: convert to refcounted frames
      8bps: fix compilation
      8svx: resolve conflicts
      4xm: resolve conflicts
      aasc: resolve conflicts
      bfi: fix compilation
      aura: fix compilation
      alsdec: resolve conflicts
      avrndec: convert to refcounted frames
      avuidec: convert to refcounted frames
      bintext: convert to refcounted frames
      cavsdec: resolve conflicts
      brender_pix: convert to refcounted frames
      cinepak: resolve conflicts
      cinepak: avoid using AVFrame struct directly in private context
      cljr: fix compilation
      cpia: convert to refcounted frames
      cscd: resolve conflicts
      iff: resolve conflicts and do proper conversion to refcounted frames
      4xm: fix reference frame handling
      cyuv: fix compilation
      dxa: fix compilation
      eacmv: fix compilation
      eamad: fix compilation
      eatgv: fix compilation
      escape124: remove unused variable.
      escape130: convert to refcounted frames
      evrcdec: convert to refcounted frames
      exr: convert to refcounted frames
      mvcdec: convert to refcounted frames
      paf: properly free the frame data on decode close
      sgirle: convert to refcounted frames
      lavfi/moviesrc: use refcounted frames

Michael Niedermayer (56):
      Merge commit '759001c534287a96dc96d1e274665feb7059145d'
      resolve conflicts in headers
      motion_est: resolve conflict
      mpeg4videodec: fix conflicts
      dpcm conflict fix
      dpx: fix conflicts
      indeo3: resolve confilcts
      kmvc: resolve conflicts
      kmvc: resolve conflicts
      h264: resolve conflicts
      utils: resolve conflicts
      rawdec: resolve conflcits
      mpegvideo: resolve conflicts
      svq1enc: resolve conflicts
      mpegvideo: dont clear data, fix assertion failure on fate vsynth1 with threads
      pthreads: resolve conflicts
      frame_thread_encoder: simple compilefix not yet tested
      snow: update to buffer refs
      crytsalhd: fix compile
      dirac: switch to new API
      sonic: update to new API
      svq1: resolve conflict, update to new API
      ffwavesynth: update to new buffer API
      g729: update to new API
      indeo5: fix compile
      j2kdec: update to new buffer API
      linopencore-amr: fix compile
      libvorbisdec: update to new API
      loco: fix compile
      paf: update to new API
      proresdec: update to new API
      vp56: update to new api / resolve conflicts
      xface: convert to refcounted frames
      xan: fix compile&fate
      v408: update to ref counted buffers
      v308: update to ref counted buffers
      yuv4dec: update to ref counted buffers
      y41p: update to ref counted frames
      xbm: update to refcounted frames
      targa_y216: update to refcounted buffers
      qpeg: fix fate/crash
      cdxl: fix fate
      tscc: fix reget buffer useage
      targa_y216dec: fix style
      msmpeg4: fix fate
      h264: ref_picture() copy fields that have been lost too
      update_frame_pool: use channel field
      h264: Put code that prevents deadlocks back
      mpegvideo: dont allow last == current
      wmalossless: fix buffer ref messup
      ff_alloc_picture: free tables in case of dimension mismatches
      h264: fix null pointer dereference and assertion failure
      frame_thread_encoder: update to bufrefs
      ec: fix used arrays
      snowdec: fix off by 1 error in dimensions check
      h264: disallow single unpaired fields as references of frames

Paul B Mahol (2):
      lavc/vima: convert to refcounted frames
      sanm: convert to refcounted frames

Conflicts:
	libavcodec/4xm.c
	libavcodec/8bps.c
	libavcodec/8svx.c
	libavcodec/aasc.c
	libavcodec/alsdec.c
	libavcodec/anm.c
	libavcodec/ansi.c
	libavcodec/avs.c
	libavcodec/bethsoftvideo.c
	libavcodec/bfi.c
	libavcodec/c93.c
	libavcodec/cavsdec.c
	libavcodec/cdgraphics.c
	libavcodec/cinepak.c
	libavcodec/cljr.c
	libavcodec/cscd.c
	libavcodec/dnxhddec.c
	libavcodec/dpcm.c
	libavcodec/dpx.c
	libavcodec/dsicinav.c
	libavcodec/dvdec.c
	libavcodec/dxa.c
	libavcodec/eacmv.c
	libavcodec/eamad.c
	libavcodec/eatgq.c
	libavcodec/eatgv.c
	libavcodec/eatqi.c
	libavcodec/error_resilience.c
	libavcodec/escape124.c
	libavcodec/ffv1.h
	libavcodec/ffv1dec.c
	libavcodec/flicvideo.c
	libavcodec/fraps.c
	libavcodec/frwu.c
	libavcodec/g723_1.c
	libavcodec/gifdec.c
	libavcodec/h264.c
	libavcodec/h264.h
	libavcodec/h264_direct.c
	libavcodec/h264_loopfilter.c
	libavcodec/h264_refs.c
	libavcodec/huffyuvdec.c
	libavcodec/idcinvideo.c
	libavcodec/iff.c
	libavcodec/indeo2.c
	libavcodec/indeo3.c
	libavcodec/internal.h
	libavcodec/interplayvideo.c
	libavcodec/ivi_common.c
	libavcodec/jvdec.c
	libavcodec/kgv1dec.c
	libavcodec/kmvc.c
	libavcodec/lagarith.c
	libavcodec/libopenjpegdec.c
	libavcodec/mdec.c
	libavcodec/mimic.c
	libavcodec/mjpegbdec.c
	libavcodec/mjpegdec.c
	libavcodec/mmvideo.c
	libavcodec/motion_est.c
	libavcodec/motionpixels.c
	libavcodec/mpc7.c
	libavcodec/mpeg12.c
	libavcodec/mpeg4videodec.c
	libavcodec/mpegvideo.c
	libavcodec/mpegvideo.h
	libavcodec/msrle.c
	libavcodec/msvideo1.c
	libavcodec/nuv.c
	libavcodec/options_table.h
	libavcodec/pcx.c
	libavcodec/pictordec.c
	libavcodec/pngdec.c
	libavcodec/pnmdec.c
	libavcodec/pthread.c
	libavcodec/qpeg.c
	libavcodec/qtrle.c
	libavcodec/r210dec.c
	libavcodec/rawdec.c
	libavcodec/roqvideodec.c
	libavcodec/rpza.c
	libavcodec/smacker.c
	libavcodec/smc.c
	libavcodec/svq1dec.c
	libavcodec/svq1enc.c
	libavcodec/targa.c
	libavcodec/tiertexseqv.c
	libavcodec/tiff.c
	libavcodec/tmv.c
	libavcodec/truemotion1.c
	libavcodec/truemotion2.c
	libavcodec/tscc.c
	libavcodec/ulti.c
	libavcodec/utils.c
	libavcodec/utvideodec.c
	libavcodec/v210dec.c
	libavcodec/v210x.c
	libavcodec/vb.c
	libavcodec/vble.c
	libavcodec/vcr1.c
	libavcodec/vmdav.c
	libavcodec/vmnc.c
	libavcodec/vp3.c
	libavcodec/vp56.c
	libavcodec/vp56.h
	libavcodec/vp6.c
	libavcodec/vqavideo.c
	libavcodec/wavpack.c
	libavcodec/xl.c
	libavcodec/xxan.c
	libavcodec/zmbv.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-03-12 03:20:18 +01:00
commit 80e9e63c94
275 changed files with 4180 additions and 4878 deletions

View File

@ -57,6 +57,11 @@ which re-allocates them for other threads.
Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work. speed gain at this point but it should work.
If there are inter-frame dependencies, so the codec calls
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
frames must then be freed with ff_thread_release_buffer().
Otherwise leave it at zero and decode directly into the user-supplied frames.
Call ff_thread_report_progress() after some part of the current picture has decoded. Call ff_thread_report_progress() after some part of the current picture has decoded.
A good place to put this is where draw_horiz_band() is called - add this if it isn't A good place to put this is where draw_horiz_band() is called - add this if it isn't
called anywhere, as it's useful too and the implementation is trivial when you're called anywhere, as it's useful too and the implementation is trivial when you're

View File

@ -29,15 +29,9 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = PIX_FMT_YUV422P16; avctx->pix_fmt = PIX_FMT_YUV422P16;
avctx->bits_per_raw_sample = 10; avctx->bits_per_raw_sample = 10;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (avctx->codec_tag == MKTAG('a', '1', '2', 'v')) if (avctx->codec_tag == MKTAG('a', '1', '2', 'v'))
av_log_ask_for_sample(avctx, "Samples with actual transparency needed\n"); av_log_ask_for_sample(avctx, "Samples with actual transparency needed\n");
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
return 0; return 0;
} }
@ -46,14 +40,11 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
{ {
int line = 0, ret; int line = 0, ret;
const int width = avctx->width; const int width = avctx->width;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
uint16_t *y, *u, *v; uint16_t *y, *u, *v;
const uint8_t *line_end, *src = avpkt->data; const uint8_t *line_end, *src = avpkt->data;
int stride = avctx->width * 8 / 3; int stride = avctx->width * 8 / 3;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (width == 1) { if (width == 1) {
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n"); av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -64,10 +55,12 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
if ((ret = ff_get_buffer(avctx, pic)) < 0)
return ret; return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
y = (uint16_t *)pic->data[0]; y = (uint16_t *)pic->data[0];
u = (uint16_t *)pic->data[1]; u = (uint16_t *)pic->data[1];
v = (uint16_t *)pic->data[2]; v = (uint16_t *)pic->data[2];
@ -145,27 +138,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data= *avctx->coded_frame;
return avpkt->size; return avpkt->size;
} }
static av_cold int zero12v_decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_zero12v_decoder = { AVCodec ff_zero12v_decoder = {
.name = "012v", .name = "012v",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_012V, .id = AV_CODEC_ID_012V,
.init = zero12v_decode_init, .init = zero12v_decode_init,
.close = zero12v_decode_close,
.decode = zero12v_decode_frame, .decode = zero12v_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),

View File

@ -24,6 +24,7 @@
* 4XM codec. * 4XM codec.
*/ */
#include "libavutil/frame.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
@ -255,15 +256,15 @@ static av_cold void init_vlcs(FourXContext *f)
} }
} }
static void init_mv(FourXContext *f) static void init_mv(FourXContext *f, int linesize)
{ {
int i; int i;
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
if (f->version > 1) if (f->version > 1)
f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2; f->mv[i] = mv[i][0] + mv[i][1] * linesize / 2;
else else
f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2; f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * linesize / 2;
} }
} }
@ -404,14 +405,15 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
} }
} }
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_p_frame(FourXContext *f, AVFrame *frame,
const uint8_t *buf, int length)
{ {
int x, y; int x, y;
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture->data[0]; uint16_t *src = (uint16_t *)f->last_picture->data[0];
uint16_t *dst = (uint16_t *)f->current_picture->data[0]; uint16_t *dst = (uint16_t *)frame->data[0];
const int stride = f->current_picture->linesize[0] >> 1; const int stride = frame->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra, unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset; bytestream_offset, wordstream_offset;
@ -455,7 +457,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
bytestream2_init(&f->g, buf + bytestream_offset, bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset); length - bytestream_offset);
init_mv(f); init_mv(f, frame->linesize[0]);
for (y = 0; y < height; y += 8) { for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8) for (x = 0; x < width; x += 8)
@ -519,12 +521,12 @@ static int decode_i_block(FourXContext *f, int16_t *block)
return 0; return 0;
} }
static inline void idct_put(FourXContext *f, int x, int y) static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
{ {
int16_t (*block)[64] = f->block; int16_t (*block)[64] = f->block;
int stride = f->current_picture->linesize[0] >> 1; int stride = frame->linesize[0] >> 1;
int i; int i;
uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x; uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8; block[i][0] += 0x80 * 8 * 8;
@ -682,14 +684,14 @@ static int mix(int c0, int c1)
return red / 3 * 1024 + green / 3 * 32 + blue / 3; return red / 3 * 1024 + green / 3 * 32 + blue / 3;
} }
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{ {
int x, y, x2, y2; int x, y, x2, y2;
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4); const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = (uint16_t*)f->current_picture->data[0]; uint16_t *dst = (uint16_t*)frame->data[0];
const int stride = f->current_picture->linesize[0]>>1; const int stride = frame->linesize[0]>>1;
const uint8_t *buf_end = buf + length; const uint8_t *buf_end = buf + length;
GetByteContext g3; GetByteContext g3;
@ -731,7 +733,7 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
return 0; return 0;
} }
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{ {
int x, y, ret; int x, y, ret;
const int width = f->avctx->width; const int width = f->avctx->width;
@ -785,7 +787,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
if ((ret = decode_i_mb(f)) < 0) if ((ret = decode_i_mb(f)) < 0)
return ret; return ret;
idct_put(f, x, y); idct_put(f, frame, x, y);
} }
} }
@ -802,7 +804,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
int buf_size = avpkt->size; int buf_size = avpkt->size;
FourXContext *const f = avctx->priv_data; FourXContext *const f = avctx->priv_data;
AVFrame *picture = data; AVFrame *picture = data;
AVFrame *p;
int i, frame_4cc, frame_size, ret; int i, frame_4cc, frame_size, ret;
if (buf_size < 12) if (buf_size < 12)
@ -880,34 +881,30 @@ static int decode_frame(AVCodecContext *avctx, void *data,
FFSWAP(AVFrame*, f->current_picture, f->last_picture); FFSWAP(AVFrame*, f->current_picture, f->last_picture);
p = f->current_picture;
avctx->coded_frame = p;
// alternatively we would have to use our own buffer management // alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE; avctx->flags |= CODEC_FLAG_EMU_EDGE;
p->reference= 3; if ((ret = ff_reget_buffer(avctx, f->current_picture)) < 0) {
if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
if (frame_4cc == AV_RL32("ifr2")) { if (frame_4cc == AV_RL32("ifr2")) {
p->pict_type= AV_PICTURE_TYPE_I; f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) { if ((ret = decode_i2_frame(f, f->current_picture, buf - 4, frame_size + 4)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n"); av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
return ret; return ret;
} }
} else if (frame_4cc == AV_RL32("ifrm")) { } else if (frame_4cc == AV_RL32("ifrm")) {
p->pict_type= AV_PICTURE_TYPE_I; f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, buf, frame_size)) < 0) { if ((ret = decode_i_frame(f, f->current_picture, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n"); av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
return ret; return ret;
} }
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) { } else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture->data[0]) { if (!f->last_picture->data[0]) {
f->last_picture->reference = 3; if ((ret = ff_get_buffer(avctx, f->last_picture,
if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) { AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -915,8 +912,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
memset(f->last_picture->data[0] + i*f->last_picture->linesize[0], 0, 2*avctx->width); memset(f->last_picture->data[0] + i*f->last_picture->linesize[0], 0, 2*avctx->width);
} }
p->pict_type = AV_PICTURE_TYPE_P; f->current_picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, buf, frame_size)) < 0) { if ((ret = decode_p_frame(f, f->current_picture, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n"); av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
return ret; return ret;
} }
@ -928,9 +925,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
buf_size); buf_size);
} }
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I; f->current_picture->key_frame = f->current_picture->pict_type == AV_PICTURE_TYPE_I;
*picture = *p; if ((ret = av_frame_ref(picture, f->current_picture)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
emms_c(); emms_c();
@ -961,13 +959,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
else else
avctx->pix_fmt = AV_PIX_FMT_BGR555; avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = avcodec_alloc_frame(); f->current_picture = av_frame_alloc();
f->last_picture = avcodec_alloc_frame(); f->last_picture = av_frame_alloc();
if (!f->current_picture || !f->last_picture) { if (!f->current_picture || !f->last_picture)
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
}
return 0; return 0;
} }
@ -985,12 +980,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
f->cfrm[i].allocated_size = 0; f->cfrm[i].allocated_size = 0;
} }
ff_free_vlc(&f->pre_vlc); ff_free_vlc(&f->pre_vlc);
if (f->current_picture->data[0]) av_frame_free(&f->current_picture);
avctx->release_buffer(avctx, f->current_picture); av_frame_free(&f->last_picture);
if (f->last_picture->data[0])
avctx->release_buffer(avctx, f->last_picture);
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return 0; return 0;
} }

View File

@ -46,7 +46,6 @@ static const enum AVPixelFormat pixfmt_rgb24[] = {
typedef struct EightBpsContext { typedef struct EightBpsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic;
unsigned char planes; unsigned char planes;
unsigned char planemap[4]; unsigned char planemap[4];
@ -57,6 +56,7 @@ typedef struct EightBpsContext {
static int decode_frame(AVCodecContext *avctx, void *data, static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
@ -70,12 +70,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned char *planemap = c->planemap; unsigned char *planemap = c->planemap;
int ret; int ret;
if (c->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -89,8 +84,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Decode a plane */ /* Decode a plane */
for (row = 0; row < height; row++) { for (row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p]; pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0]; pixptr_end = pixptr + frame->linesize[0];
if(lp - encoded + row*2 + 1 >= buf_size) if(lp - encoded + row*2 + 1 >= buf_size)
return -1; return -1;
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2)); dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
@ -129,15 +124,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
AV_PKT_DATA_PALETTE, AV_PKT_DATA_PALETTE,
NULL); NULL);
if (pal) { if (pal) {
c->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE); memcpy(c->pal, pal, AVPALETTE_SIZE);
} }
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -148,9 +142,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx; c->avctx = avctx;
c->pic.data[0] = NULL;
avcodec_get_frame_defaults(&c->pic);
switch (avctx->bits_per_coded_sample) { switch (avctx->bits_per_coded_sample) {
case 8: case 8:
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
@ -188,23 +180,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
}
AVCodec ff_eightbps_decoder = { AVCodec ff_eightbps_decoder = {
.name = "8bps", .name = "8bps",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_8BPS, .id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext), .priv_data_size = sizeof(EightBpsContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"), .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),

View File

@ -136,7 +136,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = buf_size * 2; frame->nb_samples = buf_size * 2;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -190,8 +190,9 @@ static int frame_configure_elements(AVCodecContext *avctx)
} }
/* get output buffer */ /* get output buffer */
av_frame_unref(ac->frame);
ac->frame->nb_samples = 2048; ac->frame->nb_samples = 2048;
if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) { if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -29,12 +29,13 @@
#include <string.h> #include <string.h>
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
#include "msrledec.h" #include "msrledec.h"
typedef struct AascContext { typedef struct AascContext {
AVCodecContext *avctx; AVCodecContext *avctx;
GetByteContext gb; GetByteContext gb;
AVFrame frame; AVFrame *frame;
uint32_t palette[AVPALETTE_COUNT]; uint32_t palette[AVPALETTE_COUNT];
int palette_size; int palette_size;
@ -68,7 +69,10 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", avctx->bits_per_coded_sample); av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", avctx->bits_per_coded_sample);
return -1; return -1;
} }
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -87,9 +91,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -112,14 +114,14 @@ static int aasc_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n"); av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
break; break;
} }
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width * psize); memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize);
buf += stride; buf += stride;
buf_size -= stride; buf_size -= stride;
} }
break; break;
case 1: case 1:
bytestream2_init(&s->gb, buf, buf_size); bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb); ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr); av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
@ -132,10 +134,11 @@ static int aasc_decode_frame(AVCodecContext *avctx,
} }
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
memcpy(s->frame.data[1], s->palette, s->palette_size); memcpy(s->frame->data[1], s->palette, s->palette_size);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -145,9 +148,7 @@ static av_cold int aasc_decode_end(AVCodecContext *avctx)
{ {
AascContext *s = avctx->priv_data; AascContext *s = avctx->priv_data;
/* release the last frame */ av_frame_free(&s->frame);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -1375,7 +1375,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->num_blocks * 256; frame->nb_samples = s->num_blocks * 256;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -637,7 +637,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = nb_samples; frame->nb_samples = nb_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -142,7 +142,7 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = num_blocks * BLOCK_SAMPLES; frame->nb_samples = num_blocks * BLOCK_SAMPLES;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -289,7 +289,7 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
if (!alac->nb_samples) { if (!alac->nb_samples) {
/* get output buffer */ /* get output buffer */
frame->nb_samples = output_samples; frame->nb_samples = output_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1480,7 +1480,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
/* get output buffer */ /* get output buffer */
frame->nb_samples = ctx->cur_frame_length; frame->nb_samples = ctx->cur_frame_length;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n");
return ret; return ret;
} }

View File

@ -963,7 +963,7 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = AMR_BLOCK_SIZE; frame->nb_samples = AMR_BLOCK_SIZE;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1112,7 +1112,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k; frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -26,9 +26,10 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct AnmContext { typedef struct AnmContext {
AVFrame frame; AVFrame *frame;
int palette[AVPALETTE_COUNT]; int palette[AVPALETTE_COUNT];
GetByteContext gb; GetByteContext gb;
int x; ///< x coordinate position int x; ///< x coordinate position
@ -41,8 +42,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->frame); s->frame = av_frame_alloc();
s->frame.reference = 3; if (!s->frame)
return AVERROR(ENOMEM);
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size); bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256) if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -114,12 +117,12 @@ static int decode_frame(AVCodecContext *avctx,
uint8_t *dst, *dst_end; uint8_t *dst, *dst_end;
int count, ret; int count, ret;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
dst = s->frame.data[0]; dst = s->frame->data[0];
dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height; dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size); bytestream2_init(&s->gb, avpkt->data, buf_size);
@ -137,7 +140,7 @@ static int decode_frame(AVCodecContext *avctx,
do { do {
/* if statements are ordered by probability */ /* if statements are ordered by probability */
#define OP(gb, pixel, count) \ #define OP(gb, pixel, count) \
op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0]) op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0])
int type = bytestream2_get_byte(&s->gb); int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F; count = type & 0x7F;
@ -169,18 +172,20 @@ static int decode_frame(AVCodecContext *avctx,
} }
} while (bytestream2_get_bytes_left(&s->gb) > 0); } while (bytestream2_get_bytes_left(&s->gb) > 0);
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size; return buf_size;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
AnmContext *s = avctx->priv_data; AnmContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }

View File

@ -25,6 +25,7 @@
*/ */
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/frame.h"
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "libavutil/xga_font_data.h" #include "libavutil/xga_font_data.h"
#include "avcodec.h" #include "avcodec.h"
@ -50,7 +51,7 @@ static const uint8_t ansi_to_cga[16] = {
}; };
typedef struct { typedef struct {
AVFrame frame; AVFrame *frame;
int x; /**< x cursor position (pixels) */ int x; /**< x cursor position (pixels) */
int y; /**< y cursor position (pixels) */ int y; /**< y cursor position (pixels) */
int sx; /**< saved x cursor position (pixels) */ int sx; /**< saved x cursor position (pixels) */
@ -79,13 +80,16 @@ static av_cold int decode_init(AVCodecContext *avctx)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
/* defaults */ /* defaults */
s->font = avpriv_vga16_font; s->font = avpriv_vga16_font;
s->font_height = 16; s->font_height = 16;
s->fg = DEFAULT_FG_COLOR; s->fg = DEFAULT_FG_COLOR;
s->bg = DEFAULT_BG_COLOR; s->bg = DEFAULT_BG_COLOR;
avcodec_get_frame_defaults(&s->frame);
if (!avctx->width || !avctx->height) if (!avctx->width || !avctx->height)
avcodec_set_dimensions(avctx, 80<<3, 25<<4); avcodec_set_dimensions(avctx, 80<<3, 25<<4);
@ -119,11 +123,11 @@ static void hscroll(AVCodecContext *avctx)
i = 0; i = 0;
for (; i < avctx->height - s->font_height; i++) for (; i < avctx->height - s->font_height; i++)
memcpy(s->frame.data[0] + i * s->frame.linesize[0], memcpy(s->frame->data[0] + i * s->frame->linesize[0],
s->frame.data[0] + (i + s->font_height) * s->frame.linesize[0], s->frame->data[0] + (i + s->font_height) * s->frame->linesize[0],
avctx->width); avctx->width);
for (; i < avctx->height; i++) for (; i < avctx->height; i++)
memset(s->frame.data[0] + i * s->frame.linesize[0], memset(s->frame->data[0] + i * s->frame->linesize[0],
DEFAULT_BG_COLOR, avctx->width); DEFAULT_BG_COLOR, avctx->width);
} }
@ -132,7 +136,7 @@ static void erase_line(AVCodecContext * avctx, int xoffset, int xlength)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
int i; int i;
for (i = 0; i < s->font_height; i++) for (i = 0; i < s->font_height; i++)
memset(s->frame.data[0] + (s->y + i)*s->frame.linesize[0] + xoffset, memset(s->frame->data[0] + (s->y + i)*s->frame->linesize[0] + xoffset,
DEFAULT_BG_COLOR, xlength); DEFAULT_BG_COLOR, xlength);
} }
@ -141,7 +145,7 @@ static void erase_screen(AVCodecContext *avctx)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
int i; int i;
for (i = 0; i < avctx->height; i++) for (i = 0; i < avctx->height; i++)
memset(s->frame.data[0] + i * s->frame.linesize[0], DEFAULT_BG_COLOR, avctx->width); memset(s->frame->data[0] + i * s->frame->linesize[0], DEFAULT_BG_COLOR, avctx->width);
s->x = s->y = 0; s->x = s->y = 0;
} }
@ -162,8 +166,8 @@ static void draw_char(AVCodecContext *avctx, int c)
FFSWAP(int, fg, bg); FFSWAP(int, fg, bg);
if ((s->attributes & ATTR_CONCEALED)) if ((s->attributes & ATTR_CONCEALED))
fg = bg; fg = bg;
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x, ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
s->frame.linesize[0], s->font, s->font_height, c, fg, bg); s->frame->linesize[0], s->font, s->font_height, c, fg, bg);
s->x += FONT_WIDTH; s->x += FONT_WIDTH;
if (s->x >= avctx->width) { if (s->x >= avctx->width) {
s->x = 0; s->x = 0;
@ -240,17 +244,16 @@ static int execute_code(AVCodecContext * avctx, int c)
av_log_ask_for_sample(avctx, "unsupported screen mode\n"); av_log_ask_for_sample(avctx, "unsupported screen mode\n");
} }
if (width != avctx->width || height != avctx->height) { if (width != avctx->width || height != avctx->height) {
if (s->frame.data[0]) av_frame_unref(s->frame);
avctx->release_buffer(avctx, &s->frame);
avcodec_set_dimensions(avctx, width, height); avcodec_set_dimensions(avctx, width, height);
ret = ff_get_buffer(avctx, &s->frame); ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
set_palette((uint32_t *)s->frame.data[1]); set_palette((uint32_t *)s->frame->data[1]);
erase_screen(avctx); erase_screen(avctx);
} else if (c == 'l') { } else if (c == 'l') {
erase_screen(avctx); erase_screen(avctx);
@ -261,13 +264,13 @@ static int execute_code(AVCodecContext * avctx, int c)
case 0: case 0:
erase_line(avctx, s->x, avctx->width - s->x); erase_line(avctx, s->x, avctx->width - s->x);
if (s->y < avctx->height - s->font_height) if (s->y < avctx->height - s->font_height)
memset(s->frame.data[0] + (s->y + s->font_height)*s->frame.linesize[0], memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame.linesize[0]); DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
break; break;
case 1: case 1:
erase_line(avctx, 0, s->x); erase_line(avctx, 0, s->x);
if (s->y > 0) if (s->y > 0)
memset(s->frame.data[0], DEFAULT_BG_COLOR, s->y * s->frame.linesize[0]); memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
break; break;
case 2: case 2:
erase_screen(avctx); erase_screen(avctx);
@ -348,20 +351,20 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int ret, i, count; int ret, i, count;
ret = avctx->reget_buffer(avctx, &s->frame); ret = ff_reget_buffer(avctx, s->frame);
if (ret < 0){ if (ret < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
if (!avctx->frame_number) { if (!avctx->frame_number) {
for (i=0; i<avctx->height; i++) for (i=0; i<avctx->height; i++)
memset(s->frame.data[0]+ i*s->frame.linesize[0], 0, avctx->width); memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width);
memset(s->frame.data[1], 0, AVPALETTE_SIZE); memset(s->frame->data[1], 0, AVPALETTE_SIZE);
} }
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
set_palette((uint32_t *)s->frame.data[1]); set_palette((uint32_t *)s->frame->data[1]);
if (!s->first_frame) { if (!s->first_frame) {
erase_screen(avctx); erase_screen(avctx);
s->first_frame = 1; s->first_frame = 1;
@ -449,15 +452,16 @@ static int decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size; return buf_size;
} }
static av_cold int decode_close(AVCodecContext *avctx) static av_cold int decode_close(AVCodecContext *avctx)
{ {
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }

View File

@ -904,7 +904,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = blockstodecode; frame->nb_samples = blockstodecode;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -180,14 +180,14 @@ static inline int decode_mb(ASV1Context *a, int16_t block[6][64])
return 0; return 0;
} }
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y) static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
{ {
int16_t (*block)[64] = a->block; int16_t (*block)[64] = a->block;
int linesize = a->picture.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y , linesize, block[0]); a->dsp.idct_put(dest_y , linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]); a->dsp.idct_put(dest_y + 8, linesize, block[1]);
@ -195,8 +195,8 @@ static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
if (!(a->avctx->flags&CODEC_FLAG_GRAY)) { if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]); a->dsp.idct_put(dest_cb, frame->linesize[1], block[4]);
a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]); a->dsp.idct_put(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -207,15 +207,10 @@ static int decode_frame(AVCodecContext *avctx,
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *picture = data; AVFrame * const p = data;
AVFrame * const p = &a->picture;
int mb_x, mb_y, ret; int mb_x, mb_y, ret;
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -242,7 +237,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
@ -252,7 +247,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
@ -262,11 +257,10 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
*picture = a->picture;
*got_frame = 1; *got_frame = 1;
emms_c(); emms_c();
@ -277,7 +271,6 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
AVFrame *p = &a->picture;
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2; const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i; int i;
@ -300,11 +293,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale; a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
} }
p->qstride = a->mb_width;
p->qscale_table = av_malloc(p->qstride * a->mb_height);
p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
memset(p->qscale_table, p->quality, p->qstride * a->mb_height);
return 0; return 0;
} }
@ -313,12 +301,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer); av_freep(&a->bitstream_buffer);
av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size = 0; a->bitstream_buffer_size = 0;
if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture);
return 0; return 0;
} }

View File

@ -287,7 +287,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = AT1_SU_SAMPLES; frame->nb_samples = AT1_SU_SAMPLES;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -813,7 +813,7 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = SAMPLES_PER_FRAME; frame->nb_samples = SAMPLES_PER_FRAME;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -27,21 +27,12 @@
#include "internal.h" #include "internal.h"
#include "libavutil/internal.h" #include "libavutil/internal.h"
typedef struct AuraDecodeContext {
AVCodecContext *avctx;
AVFrame frame;
} AuraDecodeContext;
static av_cold int aura_decode_init(AVCodecContext *avctx) static av_cold int aura_decode_init(AVCodecContext *avctx)
{ {
AuraDecodeContext *s = avctx->priv_data;
s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */ /* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3) if (avctx->width & 0x3)
return AVERROR(EINVAL); return AVERROR(EINVAL);
avctx->pix_fmt = AV_PIX_FMT_YUV422P; avctx->pix_fmt = AV_PIX_FMT_YUV422P;
avcodec_get_frame_defaults(&s->frame);
return 0; return 0;
} }
@ -50,7 +41,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *pkt) AVPacket *pkt)
{ {
AuraDecodeContext *s = avctx->priv_data; AVFrame *frame = data;
uint8_t *Y, *U, *V; uint8_t *Y, *U, *V;
uint8_t val; uint8_t val;
int x, y, ret; int x, y, ret;
@ -68,19 +59,14 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48; buf += 48;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
Y = s->frame.data[0]; Y = frame->data[0];
U = s->frame.data[1]; U = frame->data[1];
V = s->frame.data[2]; V = frame->data[2];
/* iterate through each line in the height */ /* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
@ -103,34 +89,21 @@ static int aura_decode_frame(AVCodecContext *avctx,
Y[1] = Y[ 0] + delta_table[val & 0xF]; Y[1] = Y[ 0] + delta_table[val & 0xF];
Y += 2; U++; V++; Y += 2; U++; V++;
} }
Y += s->frame.linesize[0] - avctx->width; Y += frame->linesize[0] - avctx->width;
U += s->frame.linesize[1] - (avctx->width >> 1); U += frame->linesize[1] - (avctx->width >> 1);
V += s->frame.linesize[2] - (avctx->width >> 1); V += frame->linesize[2] - (avctx->width >> 1);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return pkt->size; return pkt->size;
} }
static av_cold int aura_decode_end(AVCodecContext *avctx)
{
AuraDecodeContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_aura2_decoder = { AVCodec ff_aura2_decoder = {
.name = "aura2", .name = "aura2",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AURA2, .id = AV_CODEC_ID_AURA2,
.priv_data_size = sizeof(AuraDecodeContext),
.init = aura_decode_init, .init = aura_decode_init,
.close = aura_decode_end,
.decode = aura_decode_frame, .decode = aura_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"), .long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"),

View File

@ -894,6 +894,7 @@ typedef struct AVPanScan{
#define FF_QSCALE_TYPE_H264 2 #define FF_QSCALE_TYPE_H264 2
#define FF_QSCALE_TYPE_VP56 3 #define FF_QSCALE_TYPE_VP56 3
#if FF_API_GET_BUFFER
#define FF_BUFFER_TYPE_INTERNAL 1 #define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) #define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. #define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
@ -903,6 +904,12 @@ typedef struct AVPanScan{
#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. #define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. #define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). #define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
#endif
/**
* The decoder will keep a reference to the frame and may reuse it later.
*/
#define AV_GET_BUFFER_FLAG_REF (1 << 0)
/** /**
* @defgroup lavc_packet AVPacket * @defgroup lavc_packet AVPacket
@ -1982,6 +1989,7 @@ typedef struct AVCodecContext {
*/ */
enum AVSampleFormat request_sample_fmt; enum AVSampleFormat request_sample_fmt;
#if FF_API_GET_BUFFER
/** /**
* Called at the beginning of each frame to get a buffer for it. * Called at the beginning of each frame to get a buffer for it.
* *
@ -2041,7 +2049,10 @@ typedef struct AVCodecContext {
* *
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*
* @deprecated use get_buffer2()
*/ */
attribute_deprecated
int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
/** /**
@ -2052,7 +2063,10 @@ typedef struct AVCodecContext {
* but not by more than one thread at once, so does not need to be reentrant. * but not by more than one thread at once, so does not need to be reentrant.
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*
* @deprecated custom freeing callbacks should be set from get_buffer2()
*/ */
attribute_deprecated
void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
/** /**
@ -2067,8 +2081,100 @@ typedef struct AVCodecContext {
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*/ */
attribute_deprecated
int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
#endif
/**
* This callback is called at the beginning of each frame to get data
* buffer(s) for it. There may be one contiguous buffer for all the data or
* there may be a buffer per each data plane or anything in between. Each
* buffer must be reference-counted using the AVBuffer API.
*
* The following fields will be set in the frame before this callback is
* called:
* - format
* - width, height (video only)
* - sample_rate, channel_layout, nb_samples (audio only)
* Their values may differ from the corresponding values in
* AVCodecContext. This callback must use the frame values, not the codec
* context values, to calculate the required buffer size.
*
* This callback must fill the following fields in the frame:
* - data[]
* - linesize[]
* - extended_data:
* * if the data is planar audio with more than 8 channels, then this
* callback must allocate and fill extended_data to contain all pointers
* to all data planes. data[] must hold as many pointers as it can.
* extended_data must be allocated with av_malloc() and will be freed in
* av_frame_unref().
* * otherwise exended_data must point to data
* - buf[] must contain references to the buffers that contain the frame
* data.
* - extended_buf and nb_extended_buf must be allocated with av_malloc() by
* this callback and filled with the extra buffers if there are more
* buffers than buf[] can hold. extended_buf will be freed in
* av_frame_unref().
*
* If CODEC_CAP_DR1 is not set then get_buffer2() must call
* avcodec_default_get_buffer2() instead of providing buffers allocated by
* some other means.
*
* Each data plane must be aligned to the maximum required by the target
* CPU.
*
* @see avcodec_default_get_buffer2()
*
* Video:
*
* If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
* (read and/or written to if it is writable) later by libavcodec.
*
* If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an
* edge of the size returned by avcodec_get_edge_width() on all sides.
*
* avcodec_align_dimensions2() should be used to find the required width and
* height, as they normally need to be rounded up to the next multiple of 16.
*
* If frame multithreading is used and thread_safe_callbacks is set,
* this callback may be called from a different thread, but not from more
* than one at once. Does not need to be reentrant.
*
* @see avcodec_align_dimensions2()
*
* Audio:
*
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
* however, utilize only part of the buffer by setting AVFrame.nb_samples
* to a smaller value in the output frame.
*
* As a convenience, av_samples_get_buffer_size() and
* av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
* functions to find the required data size and to fill data pointers and
* linesize. In AVFrame.linesize, only linesize[0] may be set for audio
* since all planes must be the same size.
*
* @see av_samples_get_buffer_size(), av_samples_fill_arrays()
*
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
/**
* If non-zero, the decoded audio and video frames returned from
* avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
* and are valid indefinitely. The caller must free them with
* av_frame_unref() when they are not needed anymore.
* Otherwise, the decoded frames must not be freed by the caller and are
* only valid until the next decode call.
*
* - encoding: unused
* - decoding: set by the caller before avcodec_open2().
*/
int refcounted_frames;
/* - encoding parameters */ /* - encoding parameters */
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
@ -3488,9 +3594,18 @@ AVCodec *avcodec_find_decoder(enum AVCodecID id);
*/ */
AVCodec *avcodec_find_decoder_by_name(const char *name); AVCodec *avcodec_find_decoder_by_name(const char *name);
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); #if FF_API_GET_BUFFER
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
#endif
/**
* The default callback for AVCodecContext.get_buffer2(). It is made public so
* it can be called by custom get_buffer2() implementations for decoders without
* CODEC_CAP_DR1 set.
*/
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
/** /**
* Return the amount of padding in pixels which the get_buffer callback must * Return the amount of padding in pixels which the get_buffer callback must
@ -4465,8 +4580,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
*/ */
void avcodec_flush_buffers(AVCodecContext *avctx); void avcodec_flush_buffers(AVCodecContext *avctx);
void avcodec_default_free_buffers(AVCodecContext *s);
/** /**
* Return codec bits per sample. * Return codec bits per sample.
* *

View File

@ -27,7 +27,6 @@
typedef struct { typedef struct {
MJpegDecodeContext mjpeg_ctx; MJpegDecodeContext mjpeg_ctx;
AVFrame frame;
int is_mjpeg; int is_mjpeg;
int interlace; //FIXME use frame.interlaced_frame int interlace; //FIXME use frame.interlaced_frame
int tff; int tff;
@ -52,7 +51,6 @@ static av_cold int init(AVCodecContext *avctx)
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0) if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
return ret; return ret;
avcodec_get_frame_defaults(&a->frame);
avctx->pix_fmt = AV_PIX_FMT_UYVY422; avctx->pix_fmt = AV_PIX_FMT_UYVY422;
if(avctx->extradata_size >= 9 && avctx->extradata[4]+28 < avctx->extradata_size) { if(avctx->extradata_size >= 9 && avctx->extradata[4]+28 < avctx->extradata_size) {
@ -69,10 +67,6 @@ static av_cold int init(AVCodecContext *avctx)
static av_cold int end(AVCodecContext *avctx) static av_cold int end(AVCodecContext *avctx)
{ {
AVRnContext *a = avctx->priv_data; AVRnContext *a = avctx->priv_data;
AVFrame *p = &a->frame;
if(p->data[0])
avctx->release_buffer(avctx, p);
if(a->is_mjpeg) if(a->is_mjpeg)
ff_mjpeg_decode_end(avctx); ff_mjpeg_decode_end(avctx);
@ -84,7 +78,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVRnContext *a = avctx->priv_data; AVRnContext *a = avctx->priv_data;
AVFrame *p = &a->frame; AVFrame *p = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int y, ret, true_height; int y, ret, true_height;
@ -93,15 +87,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return ff_mjpeg_decode_frame(avctx, data, got_frame, avpkt); return ff_mjpeg_decode_frame(avctx, data, got_frame, avpkt);
true_height = buf_size / (2*avctx->width); true_height = buf_size / (2*avctx->width);
if(p->data[0])
avctx->release_buffer(avctx, p);
if(buf_size < 2*avctx->width * avctx->height) { if(buf_size < 2*avctx->width * avctx->height) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n"); av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if((ret = ff_get_buffer(avctx, p)) < 0){ if((ret = ff_get_buffer(avctx, p, 0)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -123,7 +115,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
} }
} }
*(AVFrame*)data = a->frame;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }

View File

@ -21,6 +21,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
typedef struct { typedef struct {
@ -59,11 +60,10 @@ avs_decode_frame(AVCodecContext * avctx,
AvsBlockType type; AvsBlockType type;
GetBitContext change_map = {0}; //init to silence warning GetBitContext change_map = {0}; //init to silence warning
if ((ret = avctx->reget_buffer(avctx, p)) < 0) { if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
p->reference = 3;
p->pict_type = AV_PICTURE_TYPE_P; p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0; p->key_frame = 0;
@ -151,7 +151,8 @@ avs_decode_frame(AVCodecContext * avctx,
align_get_bits(&change_map); align_get_bits(&change_map);
} }
*picture = avs->picture; if ((ret = av_frame_ref(picture, &avs->picture)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -169,8 +170,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
static av_cold int avs_decode_end(AVCodecContext *avctx) static av_cold int avs_decode_end(AVCodecContext *avctx)
{ {
AvsContext *s = avctx->priv_data; AvsContext *s = avctx->priv_data;
if (s->picture.data[0]) av_frame_unref(&s->picture);
avctx->release_buffer(avctx, &s->picture);
return 0; return 0;
} }

View File

@ -27,30 +27,19 @@
static av_cold int avui_decode_init(AVCodecContext *avctx) static av_cold int avui_decode_init(AVCodecContext *avctx)
{ {
avctx->pix_fmt = AV_PIX_FMT_YUVA422P; avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
static int avui_decode_frame(AVCodecContext *avctx, void *data, static int avui_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
const uint8_t *src = avpkt->data, *extradata = avctx->extradata; const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
const uint8_t *srca; const uint8_t *srca;
uint8_t *y, *u, *v, *a; uint8_t *y, *u, *v, *a;
int transparent, interlaced = 1, skip, opaque_length, i, j, k; int transparent, interlaced = 1, skip, opaque_length, i, j, k;
uint32_t extradata_size = avctx->extradata_size; uint32_t extradata_size = avctx->extradata_size;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
while (extradata_size >= 24) { while (extradata_size >= 24) {
uint32_t atom_size = AV_RB32(extradata); uint32_t atom_size = AV_RB32(extradata);
if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) { if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) {
@ -78,9 +67,7 @@ static int avui_decode_frame(AVCodecContext *avctx, void *data,
avpkt->size >= opaque_length * 2 + 4; avpkt->size >= opaque_length * 2 + 4;
srca = src + opaque_length + 5; srca = src + opaque_length + 5;
pic->reference = 0; if (ff_get_buffer(avctx, pic, 0) < 0) {
if (ff_get_buffer(avctx, pic) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
@ -129,28 +116,16 @@ static int avui_decode_frame(AVCodecContext *avctx, void *data,
srca += 4; srca += 4;
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame *)data = *pic;
return avpkt->size; return avpkt->size;
} }
static av_cold int avui_decode_close(AVCodecContext *avctx)
{
if (avctx->coded_frame->data[0])
avctx->release_buffer(avctx, avctx->coded_frame);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_avui_decoder = { AVCodec ff_avui_decoder = {
.name = "avui", .name = "avui",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AVUI, .id = AV_CODEC_ID_AVUI,
.init = avui_decode_init, .init = avui_decode_init,
.decode = avui_decode_frame, .decode = avui_decode_frame,
.close = avui_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"), .long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
}; };

View File

@ -31,6 +31,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bethsoftvideo.h" #include "bethsoftvideo.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct BethsoftvidContext { typedef struct BethsoftvidContext {
AVFrame frame; AVFrame frame;
@ -41,9 +42,6 @@ static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{ {
BethsoftvidContext *vid = avctx->priv_data; BethsoftvidContext *vid = avctx->priv_data;
avcodec_get_frame_defaults(&vid->frame); avcodec_get_frame_defaults(&vid->frame);
vid->frame.reference = 3;
vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
return 0; return 0;
} }
@ -77,7 +75,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
int code, ret; int code, ret;
int yoffset; int yoffset;
if ((ret = avctx->reget_buffer(avctx, &vid->frame)) < 0) { if ((ret = ff_reget_buffer(avctx, &vid->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -138,8 +136,10 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
} }
end: end:
if ((ret = av_frame_ref(data, &vid->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = vid->frame;
return avpkt->size; return avpkt->size;
} }
@ -147,8 +147,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx) static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
{ {
BethsoftvidContext * vid = avctx->priv_data; BethsoftvidContext * vid = avctx->priv_data;
if(vid->frame.data[0]) av_frame_unref(&vid->frame);
avctx->release_buffer(avctx, &vid->frame);
return 0; return 0;
} }

View File

@ -33,7 +33,6 @@
typedef struct BFIContext { typedef struct BFIContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
uint8_t *dst; uint8_t *dst;
uint32_t pal[256]; uint32_t pal[256];
} BFIContext; } BFIContext;
@ -42,7 +41,6 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx)
{ {
BFIContext *bfi = avctx->priv_data; BFIContext *bfi = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&bfi->frame);
bfi->dst = av_mallocz(avctx->width * avctx->height); bfi->dst = av_mallocz(avctx->width * avctx->height);
return 0; return 0;
} }
@ -50,6 +48,7 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx)
static int bfi_decode_frame(AVCodecContext *avctx, void *data, static int bfi_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
GetByteContext g; GetByteContext g;
int buf_size = avpkt->size; int buf_size = avpkt->size;
BFIContext *bfi = avctx->priv_data; BFIContext *bfi = avctx->priv_data;
@ -59,12 +58,7 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
uint32_t *pal; uint32_t *pal;
int i, j, ret, height = avctx->height; int i, j, ret, height = avctx->height;
if (bfi->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &bfi->frame);
bfi->frame.reference = 3;
if ((ret = ff_get_buffer(avctx, &bfi->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -73,14 +67,14 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
/* Set frame parameters and palette, if necessary */ /* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) { if (!avctx->frame_number) {
bfi->frame.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1; frame->key_frame = 1;
/* Setting the palette */ /* Setting the palette */
if (avctx->extradata_size > 768) { if (avctx->extradata_size > 768) {
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n"); av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pal = (uint32_t *)bfi->frame.data[1]; pal = (uint32_t *)frame->data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) { for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16; int shift = 16;
*pal = 0xFFU << 24; *pal = 0xFFU << 24;
@ -89,13 +83,13 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
(avctx->extradata[i * 3 + j] >> 4)) << shift; (avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++; pal++;
} }
memcpy(bfi->pal, bfi->frame.data[1], sizeof(bfi->pal)); memcpy(bfi->pal, frame->data[1], sizeof(bfi->pal));
bfi->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
} else { } else {
bfi->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0; frame->key_frame = 0;
bfi->frame.palette_has_changed = 0; frame->palette_has_changed = 0;
memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal)); memcpy(frame->data[1], bfi->pal, sizeof(bfi->pal));
} }
bytestream2_skip(&g, 4); // Unpacked size, not required. bytestream2_skip(&g, 4); // Unpacked size, not required.
@ -163,22 +157,20 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
} }
src = bfi->dst; src = bfi->dst;
dst = bfi->frame.data[0]; dst = frame->data[0];
while (height--) { while (height--) {
memcpy(dst, src, avctx->width); memcpy(dst, src, avctx->width);
src += avctx->width; src += avctx->width;
dst += bfi->frame.linesize[0]; dst += frame->linesize[0];
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame *)data = bfi->frame;
return buf_size; return buf_size;
} }
static av_cold int bfi_decode_close(AVCodecContext *avctx) static av_cold int bfi_decode_close(AVCodecContext *avctx)
{ {
BFIContext *bfi = avctx->priv_data; BFIContext *bfi = avctx->priv_data;
if (bfi->frame.data[0])
avctx->release_buffer(avctx, &bfi->frame);
av_free(bfi->dst); av_free(bfi->dst);
return 0; return 0;
} }

View File

@ -113,7 +113,7 @@ typedef struct BinkContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
BinkDSPContext bdsp; BinkDSPContext bdsp;
AVFrame *pic, *last; AVFrame *last;
int version; ///< internal Bink file version int version; ///< internal Bink file version
int has_alpha; int has_alpha;
int swap_planes; int swap_planes;
@ -800,8 +800,8 @@ static inline void put_pixels8x8_overlapped(uint8_t *dst, uint8_t *src, int stri
memcpy(dst + i*stride, tmp + i*8, 8); memcpy(dst + i*stride, tmp + i*8, 8);
} }
static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, static int binkb_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
int is_key, int is_chroma) int plane_idx, int is_key, int is_chroma)
{ {
int blk, ret; int blk, ret;
int i, j, bx, by; int i, j, bx, by;
@ -815,13 +815,13 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
int ybias = is_key ? -15 : 0; int ybias = is_key ? -15 : 0;
int qp; int qp;
const int stride = c->pic->linesize[plane_idx]; const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3; int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3; int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
binkb_init_bundles(c); binkb_init_bundles(c);
ref_start = c->pic->data[plane_idx]; ref_start = frame->data[plane_idx];
ref_end = c->pic->data[plane_idx] + (bh * c->pic->linesize[plane_idx] + bw) * 8; ref_end = frame->data[plane_idx] + (bh * frame->linesize[plane_idx] + bw) * 8;
for (i = 0; i < 64; i++) for (i = 0; i < 64; i++)
coordmap[i] = (i & 7) + (i >> 3) * stride; coordmap[i] = (i & 7) + (i >> 3) * stride;
@ -832,7 +832,7 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
return ret; return ret;
} }
dst = c->pic->data[plane_idx] + 8*by*stride; dst = frame->data[plane_idx] + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8) { for (bx = 0; bx < bw; bx++, dst += 8) {
blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES); blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES);
switch (blk) { switch (blk) {
@ -946,8 +946,8 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
return 0; return 0;
} }
static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, static int bink_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
int is_chroma) int plane_idx, int is_chroma)
{ {
int blk, ret; int blk, ret;
int i, j, bx, by; int i, j, bx, by;
@ -960,7 +960,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
LOCAL_ALIGNED_16(int32_t, dctblock, [64]); LOCAL_ALIGNED_16(int32_t, dctblock, [64]);
int coordmap[64]; int coordmap[64];
const int stride = c->pic->linesize[plane_idx]; const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3; int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3; int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
int width = c->avctx->width >> is_chroma; int width = c->avctx->width >> is_chroma;
@ -970,7 +970,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
read_bundle(gb, c, i); read_bundle(gb, c, i);
ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx] ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx]
: c->pic->data[plane_idx]; : frame->data[plane_idx];
ref_end = ref_start ref_end = ref_start
+ (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8; + (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8;
@ -999,9 +999,9 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
if (by == bh) if (by == bh)
break; break;
dst = c->pic->data[plane_idx] + 8*by*stride; dst = frame->data[plane_idx] + 8*by*stride;
prev = (c->last->data[plane_idx] ? c->last->data[plane_idx] prev = (c->last->data[plane_idx] ? c->last->data[plane_idx]
: c->pic->data[plane_idx]) + 8*by*stride; : frame->data[plane_idx]) + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) { for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
blk = get_value(c, BINK_SRC_BLOCK_TYPES); blk = get_value(c, BINK_SRC_BLOCK_TYPES);
// 16x16 block type on odd line means part of the already decoded block, so skip it // 16x16 block type on odd line means part of the already decoded block, so skip it
@ -1178,30 +1178,30 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
{ {
BinkContext * const c = avctx->priv_data; BinkContext * const c = avctx->priv_data;
AVFrame *frame = data;
GetBitContext gb; GetBitContext gb;
int plane, plane_idx, ret; int plane, plane_idx, ret;
int bits_count = pkt->size << 3; int bits_count = pkt->size << 3;
if (c->version > 'b') { if (c->version > 'b') {
if(c->pic->data[0]) if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
avctx->release_buffer(avctx, c->pic);
if ((ret = ff_get_buffer(avctx, c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
} else { } else {
if ((ret = avctx->reget_buffer(avctx, c->pic)) < 0) { if ((ret = ff_reget_buffer(avctx, c->last)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
if ((ret = av_frame_ref(frame, c->last)) < 0)
return ret;
} }
init_get_bits(&gb, pkt->data, bits_count); init_get_bits(&gb, pkt->data, bits_count);
if (c->has_alpha) { if (c->has_alpha) {
if (c->version >= 'i') if (c->version >= 'i')
skip_bits_long(&gb, 32); skip_bits_long(&gb, 32);
if ((ret = bink_decode_plane(c, &gb, 3, 0)) < 0) if ((ret = bink_decode_plane(c, frame, &gb, 3, 0)) < 0)
return ret; return ret;
} }
if (c->version >= 'i') if (c->version >= 'i')
@ -1211,10 +1211,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3); plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
if (c->version > 'b') { if (c->version > 'b') {
if ((ret = bink_decode_plane(c, &gb, plane_idx, !!plane)) < 0) if ((ret = bink_decode_plane(c, frame, &gb, plane_idx, !!plane)) < 0)
return ret; return ret;
} else { } else {
if ((ret = binkb_decode_plane(c, &gb, plane_idx, if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
!avctx->frame_number, !!plane)) < 0) !avctx->frame_number, !!plane)) < 0)
return ret; return ret;
} }
@ -1223,11 +1223,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
} }
emms_c(); emms_c();
*got_frame = 1; if (c->version > 'b') {
*(AVFrame*)data = *c->pic; av_frame_unref(c->last);
if ((ret = av_frame_ref(c->last, frame)) < 0)
return ret;
}
if (c->version > 'b') *got_frame = 1;
FFSWAP(AVFrame*, c->pic, c->last);
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return pkt->size; return pkt->size;
@ -1293,13 +1295,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
} }
c->avctx = avctx; c->avctx = avctx;
c->pic = avcodec_alloc_frame(); c->last = av_frame_alloc();
c->last = avcodec_alloc_frame(); if (!c->last)
if (!c->pic || !c->last) {
avcodec_free_frame(&c->pic);
avcodec_free_frame(&c->last);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
}
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0) if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
return ret; return ret;
@ -1328,12 +1326,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
BinkContext * const c = avctx->priv_data; BinkContext * const c = avctx->priv_data;
if (c->pic->data[0]) av_frame_free(&c->last);
avctx->release_buffer(avctx, c->pic);
if (c->last->data[0])
avctx->release_buffer(avctx, c->last);
avcodec_free_frame(&c->pic);
avcodec_free_frame(&c->last);
free_bundles(c); free_bundles(c);
return 0; return 0;

View File

@ -318,7 +318,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->frame_len; frame->nb_samples = s->frame_len;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -33,9 +33,10 @@
#include "avcodec.h" #include "avcodec.h"
#include "cga_data.h" #include "cga_data.h"
#include "bintext.h" #include "bintext.h"
#include "internal.h"
typedef struct XbinContext { typedef struct XbinContext {
AVFrame frame; AVFrame *frame;
int palette[16]; int palette[16];
int flags; int flags;
int font_height; int font_height;
@ -91,6 +92,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
} }
} }
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -101,10 +106,10 @@ av_unused static void hscroll(AVCodecContext *avctx)
if (s->y < avctx->height - s->font_height) { if (s->y < avctx->height - s->font_height) {
s->y += s->font_height; s->y += s->font_height;
} else { } else {
memmove(s->frame.data[0], s->frame.data[0] + s->font_height*s->frame.linesize[0], memmove(s->frame->data[0], s->frame->data[0] + s->font_height*s->frame->linesize[0],
(avctx->height - s->font_height)*s->frame.linesize[0]); (avctx->height - s->font_height)*s->frame->linesize[0]);
memset(s->frame.data[0] + (avctx->height - s->font_height)*s->frame.linesize[0], memset(s->frame->data[0] + (avctx->height - s->font_height)*s->frame->linesize[0],
DEFAULT_BG_COLOR, s->font_height * s->frame.linesize[0]); DEFAULT_BG_COLOR, s->font_height * s->frame->linesize[0]);
} }
} }
@ -118,8 +123,8 @@ static void draw_char(AVCodecContext *avctx, int c, int a)
XbinContext *s = avctx->priv_data; XbinContext *s = avctx->priv_data;
if (s->y > avctx->height - s->font_height) if (s->y > avctx->height - s->font_height)
return; return;
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x, ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
s->frame.linesize[0], s->font, s->font_height, c, s->frame->linesize[0], s->font, s->font_height, c,
a & 0x0F, a >> 4); a & 0x0F, a >> 4);
s->x += FONT_WIDTH; s->x += FONT_WIDTH;
if (s->x > avctx->width - FONT_WIDTH) { if (s->x > avctx->width - FONT_WIDTH) {
@ -136,18 +141,16 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int ret;
s->x = s->y = 0; s->x = s->y = 0;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | if (ff_reget_buffer(avctx, s->frame) < 0) {
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
memcpy(s->frame.data[1], s->palette, 16 * 4); memcpy(s->frame->data[1], s->palette, 16 * 4);
if (avctx->codec_id == AV_CODEC_ID_XBIN) { if (avctx->codec_id == AV_CODEC_ID_XBIN) {
while (buf + 2 < buf_end) { while (buf + 2 < buf_end) {
@ -201,8 +204,9 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -210,8 +214,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
XbinContext *s = avctx->priv_data; XbinContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_free(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -25,25 +25,13 @@
#include "internal.h" #include "internal.h"
#include "msrledec.h" #include "msrledec.h"
static av_cold int bmp_decode_init(AVCodecContext *avctx)
{
BMPContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
return 0;
}
static int bmp_decode_frame(AVCodecContext *avctx, static int bmp_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
BMPContext *s = avctx->priv_data; AVFrame *p = data;
AVFrame *picture = data;
AVFrame *p = &s->picture;
unsigned int fsize, hsize; unsigned int fsize, hsize;
int width, height; int width, height;
unsigned int depth; unsigned int depth;
@ -208,11 +196,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -341,29 +325,15 @@ static int bmp_decode_frame(AVCodecContext *avctx,
} }
} }
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int bmp_decode_end(AVCodecContext *avctx)
{
BMPContext* c = avctx->priv_data;
if (c->picture.data[0])
avctx->release_buffer(avctx, &c->picture);
return 0;
}
AVCodec ff_bmp_decoder = { AVCodec ff_bmp_decoder = {
.name = "bmp", .name = "bmp",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BMP, .id = AV_CODEC_ID_BMP,
.priv_data_size = sizeof(BMPContext),
.init = bmp_decode_init,
.close = bmp_decode_end,
.decode = bmp_decode_frame, .decode = bmp_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"), .long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),

View File

@ -44,7 +44,6 @@ enum BMVFlags{
typedef struct BMVDecContext { typedef struct BMVDecContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic;
uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)]; uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)];
uint32_t pal[256]; uint32_t pal[256];
@ -200,6 +199,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *pkt) AVPacket *pkt)
{ {
BMVDecContext * const c = avctx->priv_data; BMVDecContext * const c = avctx->priv_data;
AVFrame *frame = data;
int type, scr_off; int type, scr_off;
int i, ret; int i, ret;
uint8_t *srcptr, *outptr; uint8_t *srcptr, *outptr;
@ -242,11 +242,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
scr_off = 0; scr_off = 0;
} }
if (c->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 3;
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -256,20 +252,19 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
c->pic.palette_has_changed = type & BMV_PALETTE; frame->palette_has_changed = type & BMV_PALETTE;
outptr = c->pic.data[0]; outptr = frame->data[0];
srcptr = c->frame; srcptr = c->frame;
for (i = 0; i < avctx->height; i++) { for (i = 0; i < avctx->height; i++) {
memcpy(outptr, srcptr, avctx->width); memcpy(outptr, srcptr, avctx->width);
srcptr += avctx->width; srcptr += avctx->width;
outptr += c->pic.linesize[0]; outptr += frame->linesize[0];
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return pkt->size; return pkt->size;
@ -292,16 +287,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx)
{
BMVDecContext *c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
}
static const int bmv_aud_mults[16] = { static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32 16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
}; };
@ -335,7 +320,7 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = total_blocks * 32; frame->nb_samples = total_blocks * 32;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -363,7 +348,6 @@ AVCodec ff_bmv_video_decoder = {
.id = AV_CODEC_ID_BMV_VIDEO, .id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext), .priv_data_size = sizeof(BMVDecContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"), .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),

View File

@ -30,25 +30,11 @@
#include "bytestream.h" #include "bytestream.h"
#include "internal.h" #include "internal.h"
typedef struct BRPixContext {
AVFrame frame;
} BRPixContext;
typedef struct BRPixHeader { typedef struct BRPixHeader {
int format; int format;
unsigned int width, height; unsigned int width, height;
} BRPixHeader; } BRPixHeader;
static av_cold int brpix_init(AVCodecContext *avctx)
{
BRPixContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0;
}
static int brpix_decode_header(BRPixHeader *out, GetByteContext *pgb) static int brpix_decode_header(BRPixHeader *out, GetByteContext *pgb)
{ {
unsigned int header_len = bytestream2_get_be32(pgb); unsigned int header_len = bytestream2_get_be32(pgb);
@ -73,8 +59,7 @@ static int brpix_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
BRPixContext *s = avctx->priv_data; AVFrame *frame = data;
AVFrame *frame_out = data;
int ret; int ret;
GetByteContext gb; GetByteContext gb;
@ -143,16 +128,13 @@ static int brpix_decode_frame(AVCodecContext *avctx,
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
if (av_image_check_size(hdr.width, hdr.height, 0, avctx) < 0) if (av_image_check_size(hdr.width, hdr.height, 0, avctx) < 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (hdr.width != avctx->width || hdr.height != avctx->height) if (hdr.width != avctx->width || hdr.height != avctx->height)
avcodec_set_dimensions(avctx, hdr.width, hdr.height); avcodec_set_dimensions(avctx, hdr.width, hdr.height);
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -162,7 +144,7 @@ static int brpix_decode_frame(AVCodecContext *avctx,
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && if (avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
(chunk_type == 0x3 || chunk_type == 0x3d)) { (chunk_type == 0x3 || chunk_type == 0x3d)) {
BRPixHeader palhdr; BRPixHeader palhdr;
uint32_t *pal_out = (uint32_t *)s->frame.data[1]; uint32_t *pal_out = (uint32_t *)frame->data[1];
int i; int i;
ret = brpix_decode_header(&palhdr, &gb); ret = brpix_decode_header(&palhdr, &gb);
@ -190,17 +172,17 @@ static int brpix_decode_frame(AVCodecContext *avctx,
} }
bytestream2_skip(&gb, 8); bytestream2_skip(&gb, 8);
s->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
chunk_type = bytestream2_get_be32(&gb); chunk_type = bytestream2_get_be32(&gb);
} else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint32_t *pal_out = (uint32_t *)s->frame.data[1]; uint32_t *pal_out = (uint32_t *)frame->data[1];
int i; int i;
for (i = 0; i < 256; ++i) { for (i = 0; i < 256; ++i) {
*pal_out++ = (0xFFU << 24) | (i * 0x010101); *pal_out++ = (0xFFU << 24) | (i * 0x010101);
} }
s->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
} }
data_len = bytestream2_get_be32(&gb); data_len = bytestream2_get_be32(&gb);
@ -218,35 +200,21 @@ static int brpix_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
av_image_copy_plane(s->frame.data[0], s->frame.linesize[0], av_image_copy_plane(frame->data[0], frame->linesize[0],
avpkt->data + bytestream2_tell(&gb), avpkt->data + bytestream2_tell(&gb),
bytes_per_scanline, bytes_per_scanline,
bytes_per_scanline, hdr.height); bytes_per_scanline, hdr.height);
} }
*frame_out = s->frame;
*got_frame = 1; *got_frame = 1;
return avpkt->size; return avpkt->size;
} }
static av_cold int brpix_end(AVCodecContext *avctx)
{
BRPixContext *s = avctx->priv_data;
if(s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_brender_pix_decoder = { AVCodec ff_brender_pix_decoder = {
.name = "brender_pix", .name = "brender_pix",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BRENDER_PIX, .id = AV_CODEC_ID_BRENDER_PIX,
.priv_data_size = sizeof(BRPixContext),
.init = brpix_init,
.close = brpix_end,
.decode = brpix_decode_frame, .decode = brpix_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("BRender PIX image"), .long_name = NULL_IF_CONFIG_SMALL("BRender PIX image"),

View File

@ -21,6 +21,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct { typedef struct {
AVFrame pictures[2]; AVFrame pictures[2];
@ -59,10 +60,9 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
C93DecoderContext * const c93 = avctx->priv_data; C93DecoderContext * const c93 = avctx->priv_data;
if (c93->pictures[0].data[0]) av_frame_unref(&c93->pictures[0]);
avctx->release_buffer(avctx, &c93->pictures[0]); av_frame_unref(&c93->pictures[1]);
if (c93->pictures[1].data[0])
avctx->release_buffer(avctx, &c93->pictures[1]);
return 0; return 0;
} }
@ -124,17 +124,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
C93DecoderContext * const c93 = avctx->priv_data; C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic]; AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1]; AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
AVFrame *picture = data;
GetByteContext gb; GetByteContext gb;
uint8_t *out; uint8_t *out;
int stride, ret, i, x, y, b, bt = 0; int stride, ret, i, x, y, b, bt = 0;
c93->currentpic ^= 1; c93->currentpic ^= 1;
newpic->reference = 3; if ((ret = ff_reget_buffer(avctx, newpic)) < 0) {
newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
if ((ret = avctx->reget_buffer(avctx, newpic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -243,7 +239,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
memcpy(newpic->data[1], oldpic->data[1], 256 * 4); memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
} }
*picture = *newpic; if ((ret = av_frame_ref(data, newpic)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;

View File

@ -738,9 +738,9 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
h->avctx = avctx; h->avctx = avctx;
avctx->pix_fmt= AV_PIX_FMT_YUV420P; avctx->pix_fmt= AV_PIX_FMT_YUV420P;
h->cur.f = avcodec_alloc_frame(); h->cur.f = av_frame_alloc();
h->DPB[0].f = avcodec_alloc_frame(); h->DPB[0].f = av_frame_alloc();
h->DPB[1].f = avcodec_alloc_frame(); h->DPB[1].f = av_frame_alloc();
if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) { if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
ff_cavs_end(avctx); ff_cavs_end(avctx);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -771,15 +771,9 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
av_cold int ff_cavs_end(AVCodecContext *avctx) { av_cold int ff_cavs_end(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data; AVSContext *h = avctx->priv_data;
if (h->cur.f->data[0]) av_frame_free(&h->cur.f);
avctx->release_buffer(avctx, h->cur.f); av_frame_free(&h->DPB[0].f);
if (h->DPB[0].f->data[0]) av_frame_free(&h->DPB[1].f);
avctx->release_buffer(avctx, h->DPB[0].f);
if (h->DPB[1].f->data[0])
avctx->release_buffer(avctx, h->DPB[1].f);
avcodec_free_frame(&h->cur.f);
avcodec_free_frame(&h->DPB[0].f);
avcodec_free_frame(&h->DPB[1].f);
av_free(h->top_qp); av_free(h->top_qp);
av_free(h->top_mv[0]); av_free(h->top_mv[0]);

View File

@ -948,6 +948,8 @@ static int decode_pic(AVSContext *h)
int ret; int ret;
enum cavs_mb mb_type; enum cavs_mb mb_type;
av_frame_unref(h->cur.f);
skip_bits(&h->gb, 16);//bbv_dwlay skip_bits(&h->gb, 16);//bbv_dwlay
if (h->stc == PIC_PB_START_CODE) { if (h->stc == PIC_PB_START_CODE) {
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I; h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
@ -973,11 +975,10 @@ static int decode_pic(AVSContext *h)
if (h->stream_revision > 0) if (h->stream_revision > 0)
skip_bits(&h->gb, 1); //marker_bit skip_bits(&h->gb, 1); //marker_bit
} }
/* release last B frame */
if (h->cur.f->data[0])
h->avctx->release_buffer(h->avctx, h->cur.f);
if ((ret = ff_get_buffer(h->avctx, h->cur.f)) < 0) if ((ret = ff_get_buffer(h->avctx, h->cur.f,
h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
0 : AV_GET_BUFFER_FLAG_REF)) < 0)
return ret; return ret;
if (!h->edge_emu_buffer) { if (!h->edge_emu_buffer) {
@ -1075,8 +1076,7 @@ static int decode_pic(AVSContext *h)
} while (ff_cavs_next_mb(h)); } while (ff_cavs_next_mb(h));
} }
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) { if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) av_frame_unref(h->DPB[1].f);
h->avctx->release_buffer(h->avctx, h->DPB[1].f);
FFSWAP(AVSFrame, h->cur, h->DPB[1]); FFSWAP(AVSFrame, h->cur, h->DPB[1]);
FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]); FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]);
} }
@ -1142,19 +1142,15 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVSContext *h = avctx->priv_data; AVSContext *h = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *picture = data;
uint32_t stc = -1; uint32_t stc = -1;
int input_size; int input_size, ret;
const uint8_t *buf_end; const uint8_t *buf_end;
const uint8_t *buf_ptr; const uint8_t *buf_ptr;
if (buf_size == 0) { if (buf_size == 0) {
if (!h->low_delay && h->DPB[0].f->data[0]) { if (!h->low_delay && h->DPB[0].f->data[0]) {
*got_frame = 1; *got_frame = 1;
*picture = *h->DPB[0].f; av_frame_move_ref(data, h->DPB[0].f);
if (h->cur.f->data[0])
avctx->release_buffer(avctx, h->cur.f);
FFSWAP(AVSFrame, h->cur, h->DPB[0]);
} }
return 0; return 0;
} }
@ -1173,10 +1169,8 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break; break;
case PIC_I_START_CODE: case PIC_I_START_CODE:
if (!h->got_keyframe) { if (!h->got_keyframe) {
if(h->DPB[0].f->data[0]) av_frame_unref(h->DPB[0].f);
avctx->release_buffer(avctx, h->DPB[0].f); av_frame_unref(h->DPB[1].f);
if(h->DPB[1].f->data[0])
avctx->release_buffer(avctx, h->DPB[1].f);
h->got_keyframe = 1; h->got_keyframe = 1;
} }
case PIC_PB_START_CODE: case PIC_PB_START_CODE:
@ -1192,12 +1186,14 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
*got_frame = 1; *got_frame = 1;
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) { if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) { if (h->DPB[1].f->data[0]) {
*picture = *h->DPB[1].f; if ((ret = av_frame_ref(data, h->DPB[1].f)) < 0)
return ret;
} else { } else {
*got_frame = 0; *got_frame = 0;
} }
} else } else {
*picture = *h->cur.f; av_frame_move_ref(data, h->cur.f);
}
break; break;
case EXT_START_CODE: case EXT_START_CODE:
//mpeg_decode_extension(avctx, buf_ptr, input_size); //mpeg_decode_extension(avctx, buf_ptr, input_size);

View File

@ -64,26 +64,18 @@
#define CDG_PALETTE_SIZE 16 #define CDG_PALETTE_SIZE 16
typedef struct CDGraphicsContext { typedef struct CDGraphicsContext {
AVFrame frame; AVFrame *frame;
int hscroll; int hscroll;
int vscroll; int vscroll;
} CDGraphicsContext; } CDGraphicsContext;
static void cdg_init_frame(AVFrame *frame)
{
avcodec_get_frame_defaults(frame);
frame->reference = 3;
frame->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_READABLE |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
}
static av_cold int cdg_decode_init(AVCodecContext *avctx) static av_cold int cdg_decode_init(AVCodecContext *avctx)
{ {
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
cdg_init_frame(&cc->frame); cc->frame = av_frame_alloc();
if (!cc->frame)
return AVERROR(ENOMEM);
avctx->width = CDG_FULL_WIDTH; avctx->width = CDG_FULL_WIDTH;
avctx->height = CDG_FULL_HEIGHT; avctx->height = CDG_FULL_HEIGHT;
@ -95,8 +87,8 @@ static av_cold int cdg_decode_init(AVCodecContext *avctx)
static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data) static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data)
{ {
int y; int y;
int lsize = cc->frame.linesize[0]; int lsize = cc->frame->linesize[0];
uint8_t *buf = cc->frame.data[0]; uint8_t *buf = cc->frame->data[0];
int color = data[0] & 0x0F; int color = data[0] & 0x0F;
if (!(data[1] & 0x0F)) { if (!(data[1] & 0x0F)) {
@ -120,7 +112,7 @@ static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low)
uint16_t color; uint16_t color;
int i; int i;
int array_offset = low ? 0 : 8; int array_offset = low ? 0 : 8;
uint32_t *palette = (uint32_t *) cc->frame.data[1]; uint32_t *palette = (uint32_t *) cc->frame->data[1];
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F); color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F);
@ -129,7 +121,7 @@ static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low)
b = ((color ) & 0x000F) * 17; b = ((color ) & 0x000F) * 17;
palette[i + array_offset] = 0xFFU << 24 | r << 16 | g << 8 | b; palette[i + array_offset] = 0xFFU << 24 | r << 16 | g << 8 | b;
} }
cc->frame.palette_has_changed = 1; cc->frame->palette_has_changed = 1;
} }
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b) static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
@ -138,8 +130,8 @@ static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
int color; int color;
int x, y; int x, y;
int ai; int ai;
int stride = cc->frame.linesize[0]; int stride = cc->frame->linesize[0];
uint8_t *buf = cc->frame.data[0]; uint8_t *buf = cc->frame->data[0];
ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll; ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll;
ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll; ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll;
@ -210,8 +202,8 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
int color; int color;
int hscmd, h_off, hinc, vscmd, v_off, vinc; int hscmd, h_off, hinc, vscmd, v_off, vinc;
int y; int y;
int stride = cc->frame.linesize[0]; int stride = cc->frame->linesize[0];
uint8_t *in = cc->frame.data[0]; uint8_t *in = cc->frame->data[0];
uint8_t *out = new_frame->data[0]; uint8_t *out = new_frame->data[0];
color = data[0] & 0x0F; color = data[0] & 0x0F;
@ -239,7 +231,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
if (!hinc && !vinc) if (!hinc && !vinc)
return; return;
memcpy(new_frame->data[1], cc->frame.data[1], CDG_PALETTE_SIZE * 4); memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4);
for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++) for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++)
memcpy(out + FFMAX(0, hinc) + stride * y, memcpy(out + FFMAX(0, hinc) + stride * y,
@ -274,7 +266,7 @@ static int cdg_decode_frame(AVCodecContext *avctx,
int ret; int ret;
uint8_t command, inst; uint8_t command, inst;
uint8_t cdg_data[CDG_DATA_SIZE]; uint8_t cdg_data[CDG_DATA_SIZE];
AVFrame new_frame; AVFrame *frame = data;
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
if (buf_size < CDG_MINIMUM_PKT_SIZE) { if (buf_size < CDG_MINIMUM_PKT_SIZE) {
@ -286,14 +278,14 @@ static int cdg_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
ret = avctx->reget_buffer(avctx, &cc->frame); ret = ff_reget_buffer(avctx, cc->frame);
if (ret) { if (ret) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
if (!avctx->frame_number) { if (!avctx->frame_number) {
memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height); memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
memset(cc->frame.data[1], 0, AVPALETTE_SIZE); memset(cc->frame->data[1], 0, AVPALETTE_SIZE);
} }
command = bytestream_get_byte(&buf); command = bytestream_get_byte(&buf);
@ -306,8 +298,8 @@ static int cdg_decode_frame(AVCodecContext *avctx,
switch (inst) { switch (inst) {
case CDG_INST_MEMORY_PRESET: case CDG_INST_MEMORY_PRESET:
if (!(cdg_data[1] & 0x0F)) if (!(cdg_data[1] & 0x0F))
memset(cc->frame.data[0], cdg_data[0] & 0x0F, memset(cc->frame->data[0], cdg_data[0] & 0x0F,
cc->frame.linesize[0] * CDG_FULL_HEIGHT); cc->frame->linesize[0] * CDG_FULL_HEIGHT);
break; break;
case CDG_INST_LOAD_PAL_LO: case CDG_INST_LOAD_PAL_LO:
case CDG_INST_LOAD_PAL_HIGH: case CDG_INST_LOAD_PAL_HIGH:
@ -341,28 +333,33 @@ static int cdg_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
cdg_init_frame(&new_frame); ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
ret = ff_get_buffer(avctx, &new_frame);
if (ret) { if (ret) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY); cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY);
avctx->release_buffer(avctx, &cc->frame); av_frame_unref(cc->frame);
cc->frame = new_frame; ret = av_frame_ref(cc->frame, frame);
if (ret < 0)
return ret;
break; break;
default: default:
break; break;
} }
if (!frame->data[0]) {
ret = av_frame_ref(frame, cc->frame);
if (ret < 0)
return ret;
}
*got_frame = 1; *got_frame = 1;
} else { } else {
*got_frame = 0; *got_frame = 0;
buf_size = 0; buf_size = 0;
} }
*(AVFrame *) data = cc->frame;
return buf_size; return buf_size;
} }
@ -370,8 +367,7 @@ static av_cold int cdg_decode_end(AVCodecContext *avctx)
{ {
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
if (cc->frame.data[0]) av_frame_free(&cc->frame);
avctx->release_buffer(avctx, &cc->frame);
return 0; return 0;
} }

View File

@ -35,7 +35,6 @@
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
int bpp; int bpp;
int format; int format;
int padded_bits; int padded_bits;
@ -51,7 +50,6 @@ static av_cold int cdxl_decode_init(AVCodecContext *avctx)
{ {
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
avcodec_get_frame_defaults(&c->frame);
c->new_video_size = 0; c->new_video_size = 0;
c->avctx = avctx; c->avctx = avctx;
@ -115,16 +113,16 @@ static void import_format(CDXLVideoContext *c, int linesize, uint8_t *out)
} }
} }
static void cdxl_decode_rgb(CDXLVideoContext *c) static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame)
{ {
uint32_t *new_palette = (uint32_t *)c->frame.data[1]; uint32_t *new_palette = (uint32_t *)frame->data[1];
memset(c->frame.data[1], 0, AVPALETTE_SIZE); memset(frame->data[1], 0, AVPALETTE_SIZE);
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, c->frame.linesize[0], c->frame.data[0]); import_format(c, frame->linesize[0], frame->data[0]);
} }
static void cdxl_decode_ham6(CDXLVideoContext *c) static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{ {
AVCodecContext *avctx = c->avctx; AVCodecContext *avctx = c->avctx;
uint32_t new_palette[16], r, g, b; uint32_t new_palette[16], r, g, b;
@ -132,7 +130,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
int x, y; int x, y;
ptr = c->new_video; ptr = c->new_video;
out = c->frame.data[0]; out = frame->data[0];
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video); import_format(c, avctx->width, c->new_video);
@ -163,11 +161,11 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
} }
AV_WL24(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += frame->linesize[0];
} }
} }
static void cdxl_decode_ham8(CDXLVideoContext *c) static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame)
{ {
AVCodecContext *avctx = c->avctx; AVCodecContext *avctx = c->avctx;
uint32_t new_palette[64], r, g, b; uint32_t new_palette[64], r, g, b;
@ -175,7 +173,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
int x, y; int x, y;
ptr = c->new_video; ptr = c->new_video;
out = c->frame.data[0]; out = frame->data[0];
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video); import_format(c, avctx->width, c->new_video);
@ -206,7 +204,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
} }
AV_WL24(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += frame->linesize[0];
} }
} }
@ -214,7 +212,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *pkt) int *got_frame, AVPacket *pkt)
{ {
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
AVFrame * const p = &c->frame; AVFrame * const p = data;
int ret, w, h, encoding, aligned_width, buf_size = pkt->size; int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
const uint8_t *buf = pkt->data; const uint8_t *buf = pkt->data;
@ -262,11 +260,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -278,14 +272,13 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
if (!c->new_video) if (!c->new_video)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (c->bpp == 8) if (c->bpp == 8)
cdxl_decode_ham8(c); cdxl_decode_ham8(c, p);
else else
cdxl_decode_ham6(c); cdxl_decode_ham6(c, p);
} else { } else {
cdxl_decode_rgb(c); cdxl_decode_rgb(c, p);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->frame;
return buf_size; return buf_size;
} }
@ -295,8 +288,6 @@ static av_cold int cdxl_decode_end(AVCodecContext *avctx)
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
av_free(c->new_video); av_free(c->new_video);
if (c->frame.data[0])
avctx->release_buffer(avctx, &c->frame);
return 0; return 0;
} }

View File

@ -40,6 +40,7 @@
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
typedef uint8_t cvid_codebook[12]; typedef uint8_t cvid_codebook[12];
@ -57,7 +58,7 @@ typedef struct {
typedef struct CinepakContext { typedef struct CinepakContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame *frame;
const unsigned char *data; const unsigned char *data;
int size; int size;
@ -143,14 +144,14 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
for (y=strip->y1; y < strip->y2; y+=4) { for (y=strip->y1; y < strip->y2; y+=4) {
/* take care of y dimension not being multiple of 4, such streams exist */ /* take care of y dimension not being multiple of 4, such streams exist */
ip0 = ip1 = ip2 = ip3 = s->frame.data[0] + ip0 = ip1 = ip2 = ip3 = s->frame->data[0] +
(s->palette_video?strip->x1:strip->x1*3) + (y * s->frame.linesize[0]); (s->palette_video?strip->x1:strip->x1*3) + (y * s->frame->linesize[0]);
if(s->avctx->height - y > 1) { if(s->avctx->height - y > 1) {
ip1 = ip0 + s->frame.linesize[0]; ip1 = ip0 + s->frame->linesize[0];
if(s->avctx->height - y > 2) { if(s->avctx->height - y > 2) {
ip2 = ip1 + s->frame.linesize[0]; ip2 = ip1 + s->frame->linesize[0];
if(s->avctx->height - y > 3) { if(s->avctx->height - y > 3) {
ip3 = ip2 + s->frame.linesize[0]; ip3 = ip2 + s->frame->linesize[0];
} }
} }
} }
@ -359,7 +360,7 @@ static int cinepak_decode (CinepakContext *s)
num_strips = FFMIN(num_strips, MAX_STRIPS); num_strips = FFMIN(num_strips, MAX_STRIPS);
s->frame.key_frame = 0; s->frame->key_frame = 0;
for (i=0; i < num_strips; i++) { for (i=0; i < num_strips; i++) {
if ((s->data + 12) > eod) if ((s->data + 12) > eod)
@ -375,7 +376,7 @@ static int cinepak_decode (CinepakContext *s)
s->strips[i].x2 = AV_RB16 (&s->data[10]); s->strips[i].x2 = AV_RB16 (&s->data[10]);
if (s->strips[i].id == 0x10) if (s->strips[i].id == 0x10)
s->frame.key_frame = 1; s->frame->key_frame = 1;
strip_size = AV_RB24 (&s->data[1]) - 12; strip_size = AV_RB24 (&s->data[1]) - 12;
if (strip_size < 0) if (strip_size < 0)
@ -420,8 +421,9 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
} }
avcodec_get_frame_defaults(&s->frame); s->frame = av_frame_alloc();
s->frame.data[0] = NULL; if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -437,10 +439,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
s->data = buf; s->data = buf;
s->size = buf_size; s->size = buf_size;
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame))) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -448,7 +447,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
if (s->palette_video) { if (s->palette_video) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) { if (pal) {
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE); memcpy(s->pal, pal, AVPALETTE_SIZE);
} }
} }
@ -458,10 +457,12 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
} }
if (s->palette_video) if (s->palette_video)
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE); memcpy (s->frame->data[1], s->pal, AVPALETTE_SIZE);
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -471,8 +472,7 @@ static av_cold int cinepak_decode_end(AVCodecContext *avctx)
{ {
CinepakContext *s = avctx->priv_data; CinepakContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_free(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -30,22 +30,6 @@
#include "internal.h" #include "internal.h"
#include "put_bits.h" #include "put_bits.h"
typedef struct CLJRContext {
AVClass *avclass;
AVFrame picture;
int dither_type;
} CLJRContext;
static av_cold int common_init(AVCodecContext *avctx)
{
CLJRContext * const a = avctx->priv_data;
avcodec_get_frame_defaults(&a->picture);
avctx->coded_frame = &a->picture;
return 0;
}
#if CONFIG_CLJR_DECODER #if CONFIG_CLJR_DECODER
static int decode_frame(AVCodecContext *avctx, static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
@ -53,15 +37,10 @@ static int decode_frame(AVCodecContext *avctx,
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
CLJRContext * const a = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
AVFrame *picture = data; AVFrame * const p = data;
AVFrame * const p = &a->picture;
int x, y, ret; int x, y, ret;
if (p->data[0])
avctx->release_buffer(avctx, p);
if (avctx->height <= 0 || avctx->width <= 0) { if (avctx->height <= 0 || avctx->width <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n"); av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -73,8 +52,7 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
p->reference = 0; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -84,9 +62,9 @@ static int decode_frame(AVCodecContext *avctx,
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]]; uint8_t *luma = &p->data[0][y * p->linesize[0]];
uint8_t *cb = &a->picture.data[1][y * a->picture.linesize[1]]; uint8_t *cb = &p->data[1][y * p->linesize[1]];
uint8_t *cr = &a->picture.data[2][y * a->picture.linesize[2]]; uint8_t *cr = &p->data[2][y * p->linesize[2]];
for (x = 0; x < avctx->width; x += 4) { for (x = 0; x < avctx->width; x += 4) {
luma[3] = (get_bits(&gb, 5)*33) >> 2; luma[3] = (get_bits(&gb, 5)*33) >> 2;
luma[2] = (get_bits(&gb, 5)*33) >> 2; luma[2] = (get_bits(&gb, 5)*33) >> 2;
@ -98,7 +76,6 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
*picture = a->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -107,15 +84,6 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
avctx->pix_fmt = AV_PIX_FMT_YUV411P; avctx->pix_fmt = AV_PIX_FMT_YUV411P;
return common_init(avctx);
}
static av_cold int decode_end(AVCodecContext *avctx)
{
CLJRContext *a = avctx->priv_data;
if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture);
return 0; return 0;
} }
@ -123,9 +91,7 @@ AVCodec ff_cljr_decoder = {
.name = "cljr", .name = "cljr",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR, .id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"), .long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"),
@ -133,6 +99,21 @@ AVCodec ff_cljr_decoder = {
#endif #endif
#if CONFIG_CLJR_ENCODER #if CONFIG_CLJR_ENCODER
typedef struct CLJRContext {
AVClass *avclass;
AVFrame picture;
int dither_type;
} CLJRContext;
static av_cold int encode_init(AVCodecContext *avctx)
{
CLJRContext * const a = avctx->priv_data;
avctx->coded_frame = &a->picture;
return 0;
}
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet) const AVFrame *p, int *got_packet)
{ {
@ -201,7 +182,7 @@ AVCodec ff_cljr_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR, .id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext), .priv_data_size = sizeof(CLJRContext),
.init = common_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },

View File

@ -271,18 +271,13 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt) int *got_picture_ptr, AVPacket *avpkt)
{ {
CLLCContext *ctx = avctx->priv_data; CLLCContext *ctx = avctx->priv_data;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
uint8_t *src = avpkt->data; uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset; uint32_t info_tag, info_offset;
int data_size; int data_size;
GetBitContext gb; GetBitContext gb;
int coding_type, ret; int coding_type, ret;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
pic->reference = 0;
/* Skip the INFO header if present */ /* Skip the INFO header if present */
info_offset = 0; info_offset = 0;
info_tag = AV_RL32(src); info_tag = AV_RL32(src);
@ -334,7 +329,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_RGB24; avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8; avctx->bits_per_raw_sample = 8;
ret = ff_get_buffer(avctx, pic); ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret; return ret;
@ -349,7 +344,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_ARGB; avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8; avctx->bits_per_raw_sample = 8;
ret = ff_get_buffer(avctx, pic); ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret; return ret;
@ -369,7 +364,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
pic->pict_type = AV_PICTURE_TYPE_I; pic->pict_type = AV_PICTURE_TYPE_I;
*got_picture_ptr = 1; *got_picture_ptr = 1;
*(AVFrame *)data = *pic;
return avpkt->size; return avpkt->size;
} }
@ -378,10 +372,6 @@ static av_cold int cllc_decode_close(AVCodecContext *avctx)
{ {
CLLCContext *ctx = avctx->priv_data; CLLCContext *ctx = avctx->priv_data;
if (avctx->coded_frame->data[0])
avctx->release_buffer(avctx, avctx->coded_frame);
av_freep(&avctx->coded_frame);
av_freep(&ctx->swapped_buf); av_freep(&ctx->swapped_buf);
return 0; return 0;
@ -398,12 +388,6 @@ static av_cold int cllc_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&ctx->dsp, avctx); ff_dsputil_init(&ctx->dsp, avctx);
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
return 0; return 0;
} }

View File

@ -142,7 +142,7 @@ static int cng_decode_frame(AVCodecContext *avctx, void *data,
p->excitation, avctx->frame_size, p->order); p->excitation, avctx->frame_size, p->order);
frame->nb_samples = avctx->frame_size; frame->nb_samples = avctx->frame_size;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -970,7 +970,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
if (q->discarded_packets >= 2) { if (q->discarded_packets >= 2) {
frame->nb_samples = q->samples_per_channel; frame->nb_samples = q->samples_per_channel;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -24,6 +24,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
#define FRAME_HEADER_SIZE 64 #define FRAME_HEADER_SIZE 64
@ -42,7 +43,7 @@
typedef struct { typedef struct {
AVFrame frame; AVFrame *frame;
} CpiaContext; } CpiaContext;
@ -58,7 +59,7 @@ static int cpia_decode_frame(AVCodecContext *avctx,
uint16_t linelength; uint16_t linelength;
uint8_t skip; uint8_t skip;
AVFrame* const frame = &cpia->frame; AVFrame *frame = cpia->frame;
uint8_t *y, *u, *v, *y_end, *u_end, *v_end; uint8_t *y, *u, *v, *y_end, *u_end, *v_end;
// Check header // Check header
@ -99,7 +100,7 @@ static int cpia_decode_frame(AVCodecContext *avctx,
} }
// Get buffer filled with previous frame // Get buffer filled with previous frame
if ((ret = avctx->reget_buffer(avctx, frame)) < 0) { if ((ret = ff_reget_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed!\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed!\n");
return ret; return ret;
} }
@ -184,13 +185,16 @@ static int cpia_decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*) data = *frame; if ((ret = av_frame_ref(data, cpia->frame)) < 0)
return ret;
return avpkt->size; return avpkt->size;
} }
static av_cold int cpia_decode_init(AVCodecContext *avctx) static av_cold int cpia_decode_init(AVCodecContext *avctx)
{ {
CpiaContext *s = avctx->priv_data;
// output pixel format // output pixel format
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
@ -202,9 +206,21 @@ static av_cold int cpia_decode_init(AVCodecContext *avctx)
avctx->time_base.den = 60; avctx->time_base.den = 60;
} }
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
static av_cold int cpia_decode_end(AVCodecContext *avctx)
{
CpiaContext *s = avctx->priv_data;
av_frame_free(&s->frame);
return 0;
}
AVCodec ff_cpia_decoder = { AVCodec ff_cpia_decoder = {
.name = "cpia", .name = "cpia",
@ -212,6 +228,7 @@ AVCodec ff_cpia_decoder = {
.id = AV_CODEC_ID_CPIA, .id = AV_CODEC_ID_CPIA,
.priv_data_size = sizeof(CpiaContext), .priv_data_size = sizeof(CpiaContext),
.init = cpia_decode_init, .init = cpia_decode_init,
.close = cpia_decode_end,
.decode = cpia_decode_frame, .decode = cpia_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("CPiA video format"), .long_name = NULL_IF_CONFIG_SMALL("CPiA video format"),

View File

@ -643,7 +643,7 @@ static inline CopyRet copy_frame(AVCodecContext *avctx,
priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE; FF_BUFFER_HINTS_REUSABLE;
if (!priv->pic.data[0]) { if (!priv->pic.data[0]) {
if (ff_get_buffer(avctx, &priv->pic) < 0) { if (ff_get_buffer(avctx, &priv->pic, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return RET_ERROR; return RET_ERROR;
} }

View File

@ -31,7 +31,7 @@
#include "libavutil/lzo.h" #include "libavutil/lzo.h"
typedef struct { typedef struct {
AVFrame pic; AVFrame *pic;
int linelen, height, bpp; int linelen, height, bpp;
unsigned int decomp_size; unsigned int decomp_size;
unsigned char* decomp_buf; unsigned char* decomp_buf;
@ -67,7 +67,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
CamStudioContext *c = avctx->priv_data; CamStudioContext *c = avctx->priv_data;
AVFrame *picture = data;
int ret; int ret;
if (buf_size < 2) { if (buf_size < 2) {
@ -75,10 +74,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
c->pic.reference = 3; if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) {
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -109,19 +105,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
// flip upside down, add difference frame // flip upside down, add difference frame
if (buf[0] & 1) { // keyframe if (buf[0] & 1) { // keyframe
c->pic.pict_type = AV_PICTURE_TYPE_I; c->pic->pict_type = AV_PICTURE_TYPE_I;
c->pic.key_frame = 1; c->pic->key_frame = 1;
copy_frame_default(&c->pic, c->decomp_buf, copy_frame_default(c->pic, c->decomp_buf,
c->linelen, c->height); c->linelen, c->height);
} else { } else {
c->pic.pict_type = AV_PICTURE_TYPE_P; c->pic->pict_type = AV_PICTURE_TYPE_P;
c->pic.key_frame = 0; c->pic->key_frame = 0;
add_frame_default(&c->pic, c->decomp_buf, add_frame_default(c->pic, c->decomp_buf,
c->linelen, c->height); c->linelen, c->height);
} }
*picture = c->pic;
*got_frame = 1; *got_frame = 1;
if ((ret = av_frame_ref(data, c->pic)) < 0)
return ret;
return buf_size; return buf_size;
} }
@ -139,8 +137,6 @@ static av_cold int decode_init(AVCodecContext *avctx) {
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
c->bpp = avctx->bits_per_coded_sample; c->bpp = avctx->bits_per_coded_sample;
avcodec_get_frame_defaults(&c->pic);
c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8; c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height; c->height = avctx->height;
stride = FFALIGN(c->linelen, 4); stride = FFALIGN(c->linelen, 4);
@ -150,14 +146,16 @@ static av_cold int decode_init(AVCodecContext *avctx) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx) { static av_cold int decode_end(AVCodecContext *avctx) {
CamStudioContext *c = avctx->priv_data; CamStudioContext *c = avctx->priv_data;
av_freep(&c->decomp_buf); av_freep(&c->decomp_buf);
if (c->pic.data[0]) av_frame_free(&c->pic);
avctx->release_buffer(avctx, &c->pic);
return 0; return 0;
} }

View File

@ -40,7 +40,6 @@
typedef struct CyuvDecodeContext { typedef struct CyuvDecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
int width, height; int width, height;
AVFrame frame;
} CyuvDecodeContext; } CyuvDecodeContext;
static av_cold int cyuv_decode_init(AVCodecContext *avctx) static av_cold int cyuv_decode_init(AVCodecContext *avctx)
@ -53,7 +52,6 @@ static av_cold int cyuv_decode_init(AVCodecContext *avctx)
if (s->width & 0x3) if (s->width & 0x3)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
s->height = avctx->height; s->height = avctx->height;
avcodec_get_frame_defaults(&s->frame);
return 0; return 0;
} }
@ -65,6 +63,7 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
CyuvDecodeContext *s=avctx->priv_data; CyuvDecodeContext *s=avctx->priv_data;
AVFrame *frame = data;
unsigned char *y_plane; unsigned char *y_plane;
unsigned char *u_plane; unsigned char *u_plane;
@ -106,35 +105,30 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
stream_ptr = 48; stream_ptr = 48;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
y_plane = s->frame.data[0]; y_plane = frame->data[0];
u_plane = s->frame.data[1]; u_plane = frame->data[1];
v_plane = s->frame.data[2]; v_plane = frame->data[2];
if (buf_size == rawsize) { if (buf_size == rawsize) {
int linesize = FFALIGN(s->width,2) * 2; int linesize = FFALIGN(s->width,2) * 2;
y_plane += s->frame.linesize[0] * s->height; y_plane += frame->linesize[0] * s->height;
for (stream_ptr = 0; stream_ptr < rawsize; stream_ptr += linesize) { for (stream_ptr = 0; stream_ptr < rawsize; stream_ptr += linesize) {
y_plane -= s->frame.linesize[0]; y_plane -= frame->linesize[0];
memcpy(y_plane, buf+stream_ptr, linesize); memcpy(y_plane, buf+stream_ptr, linesize);
} }
} else { } else {
/* iterate through each line in the height */ /* iterate through each line in the height */
for (y_ptr = 0, u_ptr = 0, v_ptr = 0; for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
y_ptr < (s->height * s->frame.linesize[0]); y_ptr < (s->height * frame->linesize[0]);
y_ptr += s->frame.linesize[0] - s->width, y_ptr += frame->linesize[0] - s->width,
u_ptr += s->frame.linesize[1] - s->width / 4, u_ptr += frame->linesize[1] - s->width / 4,
v_ptr += s->frame.linesize[2] - s->width / 4) { v_ptr += frame->linesize[2] - s->width / 4) {
/* reset predictors */ /* reset predictors */
cur_byte = buf[stream_ptr++]; cur_byte = buf[stream_ptr++];
@ -179,21 +173,10 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data= s->frame;
return buf_size; return buf_size;
} }
static av_cold int cyuv_decode_end(AVCodecContext *avctx)
{
CyuvDecodeContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
#if CONFIG_AURA_DECODER #if CONFIG_AURA_DECODER
AVCodec ff_aura_decoder = { AVCodec ff_aura_decoder = {
.name = "aura", .name = "aura",
@ -201,7 +184,6 @@ AVCodec ff_aura_decoder = {
.id = AV_CODEC_ID_AURA, .id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.close = cyuv_decode_end,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"), .long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"),
@ -215,7 +197,6 @@ AVCodec ff_cyuv_decoder = {
.id = AV_CODEC_ID_CYUV, .id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.close = cyuv_decode_end,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"), .long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),

View File

@ -2355,7 +2355,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = 256 * (s->sample_blocks / 8); frame->nb_samples = 256 * (s->sample_blocks / 8);
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -29,8 +29,6 @@
#include "libavutil/mem.h" #include "libavutil/mem.h"
typedef struct DfaContext { typedef struct DfaContext {
AVFrame pic;
uint32_t pal[256]; uint32_t pal[256];
uint8_t *frame_buf; uint8_t *frame_buf;
} DfaContext; } DfaContext;
@ -317,6 +315,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
AVFrame *frame = data;
DfaContext *s = avctx->priv_data; DfaContext *s = avctx->priv_data;
GetByteContext gb; GetByteContext gb;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
@ -325,10 +324,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
int ret; int ret;
int i, pal_elems; int i, pal_elems;
if (s->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0))) {
avctx->release_buffer(avctx, &s->pic);
if ((ret = ff_get_buffer(avctx, &s->pic))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -346,7 +342,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
s->pal[i] = bytestream2_get_be24(&gb) << 2; s->pal[i] = bytestream2_get_be24(&gb) << 2;
s->pal[i] |= 0xFFU << 24 | (s->pal[i] >> 6) & 0x30303; s->pal[i] |= 0xFFU << 24 | (s->pal[i] >> 6) & 0x30303;
} }
s->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
} else if (chunk_type <= 9) { } else if (chunk_type <= 9) {
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) { if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n", av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
@ -361,16 +357,15 @@ static int dfa_decode_frame(AVCodecContext *avctx,
} }
buf = s->frame_buf; buf = s->frame_buf;
dst = s->pic.data[0]; dst = frame->data[0];
for (i = 0; i < avctx->height; i++) { for (i = 0; i < avctx->height; i++) {
memcpy(dst, buf, avctx->width); memcpy(dst, buf, avctx->width);
dst += s->pic.linesize[0]; dst += frame->linesize[0];
buf += avctx->width; buf += avctx->width;
} }
memcpy(s->pic.data[1], s->pal, sizeof(s->pal)); memcpy(frame->data[1], s->pal, sizeof(s->pal));
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->pic;
return avpkt->size; return avpkt->size;
} }
@ -379,9 +374,6 @@ static av_cold int dfa_decode_end(AVCodecContext *avctx)
{ {
DfaContext *s = avctx->priv_data; DfaContext *s = avctx->priv_data;
if (s->pic.data[0])
avctx->release_buffer(avctx, &s->pic);
av_freep(&s->frame_buf); av_freep(&s->frame_buf);
return 0; return 0;

View File

@ -365,7 +365,7 @@ static void free_sequence_buffers(DiracContext *s)
for (i = 0; i < MAX_FRAMES; i++) { for (i = 0; i < MAX_FRAMES; i++) {
if (s->all_frames[i].avframe.data[0]) { if (s->all_frames[i].avframe.data[0]) {
s->avctx->release_buffer(s->avctx, &s->all_frames[i].avframe); av_frame_unref(&s->all_frames[i].avframe);
memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated)); memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
} }
@ -1671,7 +1671,7 @@ static int dirac_decode_picture_header(DiracContext *s)
for (j = 0; j < MAX_FRAMES; j++) for (j = 0; j < MAX_FRAMES; j++)
if (!s->all_frames[j].avframe.data[0]) { if (!s->all_frames[j].avframe.data[0]) {
s->ref_pics[i] = &s->all_frames[j]; s->ref_pics[i] = &s->all_frames[j];
ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe); ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
break; break;
} }
} }
@ -1712,6 +1712,7 @@ static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
{ {
DiracFrame *out = s->delay_frames[0]; DiracFrame *out = s->delay_frames[0];
int i, out_idx = 0; int i, out_idx = 0;
int ret;
/* find frame with lowest picture number */ /* find frame with lowest picture number */
for (i = 1; s->delay_frames[i]; i++) for (i = 1; s->delay_frames[i]; i++)
@ -1726,7 +1727,8 @@ static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
if (out) { if (out) {
out->avframe.reference ^= DELAYED_PIC_REF; out->avframe.reference ^= DELAYED_PIC_REF;
*got_frame = 1; *got_frame = 1;
*(AVFrame *)picture = out->avframe; if((ret = av_frame_ref(picture, &out->avframe)) < 0)
return ret;
} }
return 0; return 0;
@ -1809,7 +1811,7 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
pic->avframe.key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */ pic->avframe.key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
pic->avframe.pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */ pic->avframe.pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
if (ff_get_buffer(avctx, &pic->avframe) < 0) { if (ff_get_buffer(avctx, &pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -1836,11 +1838,12 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
uint8_t *buf = pkt->data; uint8_t *buf = pkt->data;
int buf_size = pkt->size; int buf_size = pkt->size;
int i, data_unit_size, buf_idx = 0; int i, data_unit_size, buf_idx = 0;
int ret;
/* release unused frames */ /* release unused frames */
for (i = 0; i < MAX_FRAMES; i++) for (i = 0; i < MAX_FRAMES; i++)
if (s->all_frames[i].avframe.data[0] && !s->all_frames[i].avframe.reference) { if (s->all_frames[i].avframe.data[0] && !s->all_frames[i].avframe.reference) {
avctx->release_buffer(avctx, &s->all_frames[i].avframe); av_frame_unref(&s->all_frames[i].avframe);
memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated)); memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
} }
@ -1906,12 +1909,14 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (delayed_frame) { if (delayed_frame) {
delayed_frame->avframe.reference ^= DELAYED_PIC_REF; delayed_frame->avframe.reference ^= DELAYED_PIC_REF;
*(AVFrame*)data = delayed_frame->avframe; if((ret=av_frame_ref(data, &delayed_frame->avframe)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
} }
} else if (s->current_picture->avframe.display_picture_number == s->frame_number) { } else if (s->current_picture->avframe.display_picture_number == s->frame_number) {
/* The right frame at the right time :-) */ /* The right frame at the right time :-) */
*(AVFrame*)data = s->current_picture->avframe; if((ret=av_frame_ref(data, &s->current_picture->avframe)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
} }

View File

@ -35,7 +35,6 @@
typedef struct DNXHDContext { typedef struct DNXHDContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame picture;
GetBitContext gb; GetBitContext gb;
int64_t cid; ///< compression id int64_t cid; ///< compression id
unsigned int width, height; unsigned int width, height;
@ -67,10 +66,6 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx)
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx; ctx->avctx = avctx;
avctx->coded_frame = &ctx->picture;
avcodec_get_frame_defaults(&ctx->picture);
ctx->picture.type = AV_PICTURE_TYPE_I;
ctx->picture.key_frame = 1;
ctx->cid = -1; ctx->cid = -1;
return 0; return 0;
} }
@ -110,7 +105,8 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid)
return 0; return 0;
} }
static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field) static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
const uint8_t *buf, int buf_size, int first_field)
{ {
static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 }; static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 };
int i, cid; int i, cid;
@ -124,8 +120,8 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
} }
if (buf[5] & 2) { /* interlaced */ if (buf[5] & 2) { /* interlaced */
ctx->cur_field = buf[5] & 1; ctx->cur_field = buf[5] & 1;
ctx->picture.interlaced_frame = 1; frame->interlaced_frame = 1;
ctx->picture.top_field_first = first_field ^ ctx->cur_field; frame->top_field_first = first_field ^ ctx->cur_field;
av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field);
} }
@ -168,11 +164,11 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height); av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height);
if ((ctx->height+15)>>4 == ctx->mb_height && ctx->picture.interlaced_frame) if ((ctx->height+15)>>4 == ctx->mb_height && frame->interlaced_frame)
ctx->height <<= 1; ctx->height <<= 1;
if (ctx->mb_height > 68 || if (ctx->mb_height > 68 ||
(ctx->mb_height<<ctx->picture.interlaced_frame) > (ctx->height+15)>>4) { (ctx->mb_height << frame->interlaced_frame) > (ctx->height+15)>>4) {
av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height);
return -1; return -1;
} }
@ -284,11 +280,11 @@ static void dnxhd_decode_dct_block_10(DNXHDContext *ctx, int16_t *block,
dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4); dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4);
} }
static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, int x, int y)
{ {
int shift1 = ctx->bit_depth == 10; int shift1 = ctx->bit_depth == 10;
int dct_linesize_luma = ctx->picture.linesize[0]; int dct_linesize_luma = frame->linesize[0];
int dct_linesize_chroma = ctx->picture.linesize[1]; int dct_linesize_chroma = frame->linesize[1];
uint8_t *dest_y, *dest_u, *dest_v; uint8_t *dest_y, *dest_u, *dest_v;
int dct_y_offset, dct_x_offset; int dct_y_offset, dct_x_offset;
int qscale, i; int qscale, i;
@ -309,19 +305,19 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale); ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale);
} }
if (ctx->picture.interlaced_frame) { if (frame->interlaced_frame) {
dct_linesize_luma <<= 1; dct_linesize_luma <<= 1;
dct_linesize_chroma <<= 1; dct_linesize_chroma <<= 1;
} }
dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
if (ctx->cur_field) { if (ctx->cur_field) {
dest_y += ctx->picture.linesize[0]; dest_y += frame->linesize[0];
dest_u += ctx->picture.linesize[1]; dest_u += frame->linesize[1];
dest_v += ctx->picture.linesize[2]; dest_v += frame->linesize[2];
} }
dct_y_offset = dct_linesize_luma << 3; dct_y_offset = dct_linesize_luma << 3;
@ -342,7 +338,8 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
return 0; return 0;
} }
static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int buf_size) static int dnxhd_decode_macroblocks(DNXHDContext *ctx, AVFrame *frame,
const uint8_t *buf, int buf_size)
{ {
int x, y; int x, y;
for (y = 0; y < ctx->mb_height; y++) { for (y = 0; y < ctx->mb_height; y++) {
@ -352,7 +349,7 @@ static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int b
init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3); init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3);
for (x = 0; x < ctx->mb_width; x++) { for (x = 0; x < ctx->mb_width; x++) {
//START_TIMER; //START_TIMER;
dnxhd_decode_macroblock(ctx, x, y); dnxhd_decode_macroblock(ctx, frame, x, y);
//STOP_TIMER("decode macroblock"); //STOP_TIMER("decode macroblock");
} }
} }
@ -365,6 +362,7 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *picture = data; AVFrame *picture = data;
int first_field = 1; int first_field = 1;
int ret; int ret;
@ -372,7 +370,7 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
av_dlog(avctx, "frame size %d\n", buf_size); av_dlog(avctx, "frame size %d\n", buf_size);
decode_coding_unit: decode_coding_unit:
if (dnxhd_decode_header(ctx, buf, buf_size, first_field) < 0) if (dnxhd_decode_header(ctx, picture, buf, buf_size, first_field) < 0)
return -1; return -1;
if ((avctx->width || avctx->height) && if ((avctx->width || avctx->height) &&
@ -387,24 +385,23 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
avcodec_set_dimensions(avctx, ctx->width, ctx->height); avcodec_set_dimensions(avctx, ctx->width, ctx->height);
if (first_field) { if (first_field) {
if (ctx->picture.data[0]) if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
ff_thread_release_buffer(avctx, &ctx->picture);
if ((ret = ff_thread_get_buffer(avctx, &ctx->picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
} }
dnxhd_decode_macroblocks(ctx, buf + 0x280, buf_size - 0x280); dnxhd_decode_macroblocks(ctx, picture, buf + 0x280, buf_size - 0x280);
if (first_field && ctx->picture.interlaced_frame) { if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size; buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size; buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0; first_field = 0;
goto decode_coding_unit; goto decode_coding_unit;
} }
*picture = ctx->picture;
*got_frame = 1; *got_frame = 1;
return avpkt->size; return avpkt->size;
} }
@ -413,8 +410,6 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
{ {
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
if (ctx->picture.data[0])
ff_thread_release_buffer(avctx, &ctx->picture);
ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc); ff_free_vlc(&ctx->run_vlc);

View File

@ -211,7 +211,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = (out + avctx->channels - 1) / avctx->channels; frame->nb_samples = (out + avctx->channels - 1) / avctx->channels;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -25,11 +25,6 @@
#include "avcodec.h" #include "avcodec.h"
#include "internal.h" #include "internal.h"
typedef struct DPXContext {
AVFrame picture;
} DPXContext;
static unsigned int read32(const uint8_t **ptr, int is_big) static unsigned int read32(const uint8_t **ptr, int is_big)
{ {
unsigned int temp; unsigned int temp;
@ -64,9 +59,7 @@ static int decode_frame(AVCodecContext *avctx,
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
DPXContext *const s = avctx->priv_data; AVFrame *const p = data;
AVFrame *picture = data;
AVFrame *const p = &s->picture;
uint8_t *ptr[AV_NUM_DATA_POINTERS]; uint8_t *ptr[AV_NUM_DATA_POINTERS];
unsigned int offset; unsigned int offset;
@ -186,9 +179,7 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (s->picture.data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, &s->picture);
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -262,36 +253,15 @@ static int decode_frame(AVCodecContext *avctx,
break; break;
} }
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int decode_init(AVCodecContext *avctx)
{
DPXContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
DPXContext *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
AVCodec ff_dpx_decoder = { AVCodec ff_dpx_decoder = {
.name = "dpx", .name = "dpx",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DPX, .id = AV_CODEC_ID_DPX,
.priv_data_size = sizeof(DPXContext),
.init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("DPX image"), .long_name = NULL_IF_CONFIG_SMALL("DPX image"),
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,

View File

@ -305,8 +305,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
break; break;
} }
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0) {
if ((res = avctx->reget_buffer(avctx, &cin->frame))) {
av_log(cin->avctx, AV_LOG_ERROR, "failed to allocate a frame\n"); av_log(cin->avctx, AV_LOG_ERROR, "failed to allocate a frame\n");
return res; return res;
} }
@ -320,8 +319,10 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]); FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
if ((res = av_frame_ref(data, &cin->frame)) < 0)
return res;
*got_frame = 1; *got_frame = 1;
*(AVFrame *)data = cin->frame;
return buf_size; return buf_size;
} }
@ -330,8 +331,7 @@ static av_cold int cinvideo_decode_end(AVCodecContext *avctx)
{ {
CinVideoContext *cin = avctx->priv_data; CinVideoContext *cin = avctx->priv_data;
if (cin->frame.data[0]) av_frame_unref(&cin->frame);
avctx->release_buffer(avctx, &cin->frame);
destroy_buffers(cin); destroy_buffers(cin);
@ -363,7 +363,7 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avpkt->size - cin->initial_decode_frame; frame->nb_samples = avpkt->size - cin->initial_decode_frame;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -327,17 +327,12 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return -1; /* NOTE: we only accept several full frames */ return -1; /* NOTE: we only accept several full frames */
} }
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
avcodec_get_frame_defaults(&s->picture);
s->picture.reference = 0;
s->picture.key_frame = 1; s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I; s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt; avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base; avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
if (ff_get_buffer(avctx, &s->picture) < 0) { if (ff_get_buffer(avctx, &s->picture, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -361,7 +356,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
/* return image */ /* return image */
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->picture; av_frame_move_ref(data, &s->picture);
return s->sys->frame_size; return s->sys->frame_size;
} }
@ -370,8 +365,7 @@ static int dvvideo_close(AVCodecContext *c)
{ {
DVVideoContext *s = c->priv_data; DVVideoContext *s = c->priv_data;
if (s->picture.data[0]) av_frame_unref(&s->picture);
c->release_buffer(c, &s->picture);
return 0; return 0;
} }

View File

@ -39,7 +39,7 @@
* Decoder context * Decoder context
*/ */
typedef struct DxaDecContext { typedef struct DxaDecContext {
AVFrame pic, prev; AVFrame prev;
int dsize; int dsize;
uint8_t *decomp_buf; uint8_t *decomp_buf;
@ -49,12 +49,12 @@ typedef struct DxaDecContext {
static const int shift1[6] = { 0, 8, 8, 8, 4, 4 }; static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
static const int shift2[6] = { 0, 0, 8, 4, 0, 4 }; static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint8_t *src, uint8_t *ref) static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
int stride, uint8_t *src, uint8_t *ref)
{ {
uint8_t *code, *data, *mv, *msk, *tmp, *tmp2; uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
int i, j, k; int i, j, k;
int type, x, y, d, d2; int type, x, y, d, d2;
int stride = c->pic.linesize[0];
uint32_t mask; uint32_t mask;
code = src + 12; code = src + 12;
@ -192,6 +192,7 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
DxaDecContext * const c = avctx->priv_data; DxaDecContext * const c = avctx->priv_data;
uint8_t *outptr, *srcptr, *tmpptr; uint8_t *outptr, *srcptr, *tmpptr;
unsigned long dsize; unsigned long dsize;
@ -211,17 +212,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
pc = 1; pc = 1;
} }
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) { if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
c->pic.palette_has_changed = pc; frame->palette_has_changed = pc;
outptr = c->pic.data[0]; outptr = frame->data[0];
srcptr = c->decomp_buf; srcptr = c->decomp_buf;
tmpptr = c->prev.data[0]; tmpptr = c->prev.data[0];
stride = c->pic.linesize[0]; stride = frame->linesize[0];
if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L')) if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
compr = -1; compr = -1;
@ -239,22 +240,22 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
} }
switch(compr){ switch(compr){
case -1: case -1:
c->pic.key_frame = 0; frame->key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0]) if(c->prev.data[0])
memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height); memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL' else{ // Should happen only when first frame is 'NULL'
memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height); memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
c->pic.key_frame = 1; frame->key_frame = 1;
c->pic.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
} }
break; break;
case 2: case 2:
case 3: case 3:
case 4: case 4:
case 5: case 5:
c->pic.key_frame = !(compr & 1); frame->key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){ for(j = 0; j < avctx->height; j++){
if((compr & 1) && tmpptr){ if((compr & 1) && tmpptr){
for(i = 0; i < avctx->width; i++) for(i = 0; i < avctx->width; i++)
@ -268,25 +269,24 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
break; break;
case 12: // ScummVM coding case 12: // ScummVM coding
case 13: case 13:
c->pic.key_frame = 0; frame->key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
if (!c->prev.data[0]) { if (!c->prev.data[0]) {
av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n"); av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]); decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr); av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
FFSWAP(AVFrame, c->pic, c->prev); av_frame_unref(&c->prev);
if(c->pic.data[0]) if ((ret = av_frame_ref(&c->prev, frame)) < 0)
avctx->release_buffer(avctx, &c->pic); return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->prev;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return avpkt->size; return avpkt->size;
@ -298,7 +298,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&c->pic);
avcodec_get_frame_defaults(&c->prev); avcodec_get_frame_defaults(&c->prev);
c->dsize = avctx->width * avctx->height * 2; c->dsize = avctx->width * avctx->height * 2;
@ -316,10 +315,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
DxaDecContext * const c = avctx->priv_data; DxaDecContext * const c = avctx->priv_data;
av_freep(&c->decomp_buf); av_freep(&c->decomp_buf);
if(c->prev.data[0]) av_frame_unref(&c->prev);
avctx->release_buffer(avctx, &c->prev);
if(c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0; return 0;
} }

View File

@ -28,9 +28,6 @@
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -39,21 +36,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
int h, w; int h, w;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
const uint8_t *src = avpkt->data; const uint8_t *src = avpkt->data;
uint8_t *Y1, *Y2, *U, *V; uint8_t *Y1, *Y2, *U, *V;
int ret; int ret;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) { if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n"); av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
if ((ret = ff_get_buffer(avctx, pic)) < 0)
return ret; return ret;
pic->pict_type = AV_PICTURE_TYPE_I; pic->pict_type = AV_PICTURE_TYPE_I;
@ -84,28 +77,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = *pic;
return avpkt->size; return avpkt->size;
} }
static av_cold int decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_dxtory_decoder = { AVCodec ff_dxtory_decoder = {
.name = "dxtory", .name = "dxtory",
.long_name = NULL_IF_CONFIG_SMALL("Dxtory"), .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DXTORY, .id = AV_CODEC_ID_DXTORY,
.init = decode_init, .init = decode_init,
.close = decode_close,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
}; };

View File

@ -69,15 +69,15 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context
ff_dxva2_get_surface_index(ctx, r), ff_dxva2_get_surface_index(ctx, r),
r->long_ref != 0); r->long_ref != 0);
if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX) if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
pp->FieldOrderCntList[i][0] = r->field_poc[0]; pp->FieldOrderCntList[i][0] = r->field_poc[0];
if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX) if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
pp->FieldOrderCntList[i][1] = r->field_poc[1]; pp->FieldOrderCntList[i][1] = r->field_poc[1];
pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num; pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
if (r->f.reference & PICT_TOP_FIELD) if (r->reference & PICT_TOP_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 0); pp->UsedForReferenceFlags |= 1 << (2*i + 0);
if (r->f.reference & PICT_BOTTOM_FIELD) if (r->reference & PICT_BOTTOM_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 1); pp->UsedForReferenceFlags |= 1 << (2*i + 1);
} else { } else {
pp->RefFrameList[i].bPicEntry = 0xff; pp->RefFrameList[i].bPicEntry = 0xff;
@ -230,7 +230,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
unsigned plane; unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i], fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r), ff_dxva2_get_surface_index(ctx, r),
r->f.reference == PICT_BOTTOM_FIELD); r->reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) { for (plane = 0; plane < 3; plane++) {
int w, o; int w, o;
if (plane == 0 && h->luma_weight_flag[list]) { if (plane == 0 && h->luma_weight_flag[list]) {

View File

@ -36,31 +36,38 @@
typedef struct CmvContext { typedef struct CmvContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; ///< current AVFrame *last_frame; ///< last
AVFrame last_frame; ///< last AVFrame *last2_frame; ///< second-last
AVFrame last2_frame; ///< second-last
int width, height; int width, height;
unsigned int palette[AVPALETTE_COUNT]; unsigned int palette[AVPALETTE_COUNT];
} CmvContext; } CmvContext;
static av_cold int cmv_decode_init(AVCodecContext *avctx){ static av_cold int cmv_decode_init(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
avcodec_get_frame_defaults(&s->last_frame);
avcodec_get_frame_defaults(&s->last2_frame);
s->avctx = avctx; s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->last_frame = av_frame_alloc();
s->last2_frame = av_frame_alloc();
if (!s->last_frame || !s->last2_frame) {
av_frame_free(&s->last_frame);
av_frame_free(&s->last2_frame);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){ static void cmv_decode_intra(CmvContext * s, AVFrame *frame,
unsigned char *dst = s->frame.data[0]; const uint8_t *buf, const uint8_t *buf_end)
{
unsigned char *dst = frame->data[0];
int i; int i;
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) { for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
memcpy(dst, buf, s->avctx->width); memcpy(dst, buf, s->avctx->width);
dst += s->frame.linesize[0]; dst += frame->linesize[0];
buf += s->avctx->width; buf += s->avctx->width;
} }
} }
@ -84,7 +91,9 @@ static void cmv_motcomp(unsigned char *dst, int dst_stride,
} }
} }
static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){ static void cmv_decode_inter(CmvContext *s, AVFrame *frame, const uint8_t *buf,
const uint8_t *buf_end)
{
const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16); const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16);
int x,y,i; int x,y,i;
@ -92,29 +101,29 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
for(y=0; y<s->avctx->height/4; y++) for(y=0; y<s->avctx->height/4; y++)
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) { for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
if (buf[i]==0xFF) { if (buf[i]==0xFF) {
unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4; unsigned char *dst = frame->data[0] + (y*4)*frame->linesize[0] + x*4;
if (raw+16<buf_end && *raw==0xFF) { /* intra */ if (raw+16<buf_end && *raw==0xFF) { /* intra */
raw++; raw++;
memcpy(dst, raw, 4); memcpy(dst, raw, 4);
memcpy(dst+s->frame.linesize[0], raw+4, 4); memcpy(dst + frame->linesize[0], raw+4, 4);
memcpy(dst+2*s->frame.linesize[0], raw+8, 4); memcpy(dst + 2 * frame->linesize[0], raw+8, 4);
memcpy(dst+3*s->frame.linesize[0], raw+12, 4); memcpy(dst + 3 * frame->linesize[0], raw+12, 4);
raw+=16; raw+=16;
}else if(raw<buf_end) { /* inter using second-last frame as reference */ }else if(raw<buf_end) { /* inter using second-last frame as reference */
int xoffset = (*raw & 0xF) - 7; int xoffset = (*raw & 0xF) - 7;
int yoffset = ((*raw >> 4)) - 7; int yoffset = ((*raw >> 4)) - 7;
if (s->last2_frame.data[0]) if (s->last2_frame->data[0])
cmv_motcomp(s->frame.data[0], s->frame.linesize[0], cmv_motcomp(frame->data[0], frame->linesize[0],
s->last2_frame.data[0], s->last2_frame.linesize[0], s->last2_frame->data[0], s->last2_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height); x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
raw++; raw++;
} }
}else{ /* inter using last frame as reference */ }else{ /* inter using last frame as reference */
int xoffset = (buf[i] & 0xF) - 7; int xoffset = (buf[i] & 0xF) - 7;
int yoffset = ((buf[i] >> 4)) - 7; int yoffset = ((buf[i] >> 4)) - 7;
if (s->last_frame.data[0]) if (s->last_frame->data[0])
cmv_motcomp(s->frame.data[0], s->frame.linesize[0], cmv_motcomp(frame->data[0], frame->linesize[0],
s->last_frame.data[0], s->last_frame.linesize[0], s->last_frame->data[0], s->last_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height); x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
} }
i++; i++;
@ -134,10 +143,8 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
s->height = AV_RL16(&buf[6]); s->height = AV_RL16(&buf[6]);
if (s->avctx->width!=s->width || s->avctx->height!=s->height) { if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0]) av_frame_unref(s->last_frame);
s->avctx->release_buffer(s->avctx, &s->frame); av_frame_unref(s->last2_frame);
if (s->last_frame.data[0])
s->avctx->release_buffer(s->avctx, &s->last_frame);
} }
s->avctx->time_base.num = 1; s->avctx->time_base.num = 1;
@ -164,6 +171,8 @@ static int cmv_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
AVFrame *frame = data;
int ret;
if (buf_end - buf < EA_PREAMBLE_SIZE) if (buf_end - buf < EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -179,48 +188,39 @@ static int cmv_decode_frame(AVCodecContext *avctx,
if (av_image_check_size(s->width, s->height, 0, s->avctx)) if (av_image_check_size(s->width, s->height, 0, s->avctx))
return -1; return -1;
/* shuffle */ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (s->last2_frame.data[0])
avctx->release_buffer(avctx, &s->last2_frame);
FFSWAP(AVFrame, s->last_frame, s->last2_frame);
FFSWAP(AVFrame, s->frame, s->last_frame);
s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_READABLE |
FF_BUFFER_HINTS_PRESERVE;
if (ff_get_buffer(avctx, &s->frame)<0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
buf += EA_PREAMBLE_SIZE; buf += EA_PREAMBLE_SIZE;
if ((buf[0]&1)) { // subtype if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, buf+2, buf_end); cmv_decode_inter(s, frame, buf+2, buf_end);
s->frame.key_frame = 0; frame->key_frame = 0;
s->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
}else{ }else{
s->frame.key_frame = 1; frame->key_frame = 1;
s->frame.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, buf+2, buf_end); cmv_decode_intra(s, frame, buf+2, buf_end);
} }
av_frame_unref(s->last2_frame);
av_frame_move_ref(s->last2_frame, s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
static av_cold int cmv_decode_end(AVCodecContext *avctx){ static av_cold int cmv_decode_end(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
if (s->frame.data[0])
s->avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->last_frame);
if (s->last_frame.data[0]) av_frame_free(&s->last2_frame);
s->avctx->release_buffer(avctx, &s->last_frame);
if (s->last2_frame.data[0])
s->avctx->release_buffer(avctx, &s->last2_frame);
return 0; return 0;
} }

View File

@ -45,7 +45,6 @@
typedef struct MadContext { typedef struct MadContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame frame;
AVFrame last_frame; AVFrame last_frame;
GetBitContext gb; GetBitContext gb;
void *bitstream_buf; void *bitstream_buf;
@ -78,15 +77,16 @@ static inline void comp(unsigned char *dst, int dst_stride,
dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add); dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add);
} }
static inline void comp_block(MadContext *t, int mb_x, int mb_y, static inline void comp_block(MadContext *t, AVFrame *frame,
int mb_x, int mb_y,
int j, int mv_x, int mv_y, int add) int j, int mv_x, int mv_y, int add)
{ {
if (j < 4) { if (j < 4) {
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x; unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
if (offset >= (t->avctx->height - 7) * t->last_frame.linesize[0] - 7) if (offset >= (t->avctx->height - 7) * t->last_frame.linesize[0] - 7)
return; return;
comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
t->frame.linesize[0], frame->linesize[0],
t->last_frame.data[0] + offset, t->last_frame.data[0] + offset,
t->last_frame.linesize[0], add); t->last_frame.linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
@ -94,24 +94,25 @@ static inline void comp_block(MadContext *t, int mb_x, int mb_y,
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2); unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2);
if (offset >= (t->avctx->height/2 - 7) * t->last_frame.linesize[index] - 7) if (offset >= (t->avctx->height/2 - 7) * t->last_frame.linesize[index] - 7)
return; return;
comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8, comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
t->frame.linesize[index], frame->linesize[index],
t->last_frame.data[index] + offset, t->last_frame.data[index] + offset,
t->last_frame.linesize[index], add); t->last_frame.linesize[index], add);
} }
} }
static inline void idct_put(MadContext *t, int16_t *block, int mb_x, int mb_y, int j) static inline void idct_put(MadContext *t, AVFrame *frame, int16_t *block,
int mb_x, int mb_y, int j)
{ {
if (j < 4) { if (j < 4) {
ff_ea_idct_put_c( ff_ea_idct_put_c(
t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
t->frame.linesize[0], block); frame->linesize[0], block);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3; int index = j - 3;
ff_ea_idct_put_c( ff_ea_idct_put_c(
t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x*8, frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8,
t->frame.linesize[index], block); frame->linesize[index], block);
} }
} }
@ -186,7 +187,7 @@ static int decode_motion(GetBitContext *gb)
return value; return value;
} }
static int decode_mb(MadContext *s, int inter) static int decode_mb(MadContext *s, AVFrame *frame, int inter)
{ {
int mv_map = 0; int mv_map = 0;
int mv_x, mv_y; int mv_x, mv_y;
@ -205,12 +206,12 @@ static int decode_mb(MadContext *s, int inter)
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb); int add = 2*decode_motion(&s->gb);
if (s->last_frame.data[0]) if (s->last_frame.data[0])
comp_block(s, s->mb_x, s->mb_y, j, mv_x, mv_y, add); comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else { } else {
s->dsp.clear_block(s->block); s->dsp.clear_block(s->block);
if(decode_block_intra(s, s->block) < 0) if(decode_block_intra(s, s->block) < 0)
return -1; return -1;
idct_put(s, s->block, s->mb_x, s->mb_y, j); idct_put(s, frame, s->block, s->mb_x, s->mb_y, j);
} }
} }
return 0; return 0;
@ -233,9 +234,10 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
MadContext *s = avctx->priv_data; MadContext *s = avctx->priv_data;
int width, height, ret; AVFrame *frame = data;
int width, height;
int chunk_type; int chunk_type;
int inter; int inter, ret;
if (buf_size < 26) { if (buf_size < 26) {
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n"); av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
@ -261,18 +263,12 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = av_image_check_size(width, height, 0, avctx)) < 0) if ((ret = av_image_check_size(width, height, 0, avctx)) < 0)
return ret; return ret;
avcodec_set_dimensions(avctx, width, height); avcodec_set_dimensions(avctx, width, height);
if (s->frame.data[0]) av_frame_unref(&s->last_frame);
avctx->release_buffer(avctx, &s->frame);
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
} }
s->frame.reference = 3; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (!s->frame.data[0]) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { return ret;
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
} }
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size, av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
@ -285,14 +281,16 @@ static int decode_frame(AVCodecContext *avctx,
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++) for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++) for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
if(decode_mb(s, inter) < 0) if(decode_mb(s, frame, inter) < 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
if (chunk_type != MADe_TAG) if (chunk_type != MADe_TAG) {
FFSWAP(AVFrame, s->frame, s->last_frame); av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
return ret;
}
return buf_size; return buf_size;
} }
@ -300,10 +298,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
MadContext *t = avctx->priv_data; MadContext *t = avctx->priv_data;
if (t->frame.data[0]) av_frame_unref(&t->last_frame);
avctx->release_buffer(avctx, &t->frame);
if (t->last_frame.data[0])
avctx->release_buffer(avctx, &t->last_frame);
av_free(t->bitstream_buf); av_free(t->bitstream_buf);
return 0; return 0;
} }

View File

@ -39,7 +39,6 @@
typedef struct TgqContext { typedef struct TgqContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
int width, height; int width, height;
ScanTable scantable; ScanTable scantable;
int qtable[64]; int qtable[64];
@ -105,21 +104,21 @@ static void tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb
block[0] += 128 << 4; block[0] += 128 << 4;
} }
static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame,
int mb_x, int mb_y) int mb_x, int mb_y)
{ {
int linesize = s->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]); ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]); ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]); ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
ff_ea_idct_put_c(dest_cb, s->frame.linesize[1], block[4]); ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
ff_ea_idct_put_c(dest_cr, s->frame.linesize[2], block[5]); ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -132,23 +131,24 @@ static inline void tgq_dconly(TgqContext *s, unsigned char *dst,
memset(dst + j * dst_stride, level, 8); memset(dst + j * dst_stride, level, 8);
} }
static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc) static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame,
int mb_x, int mb_y, const int8_t *dc)
{ {
int linesize = s->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
tgq_dconly(s, dest_y, linesize, dc[0]); tgq_dconly(s, dest_y, linesize, dc[0]);
tgq_dconly(s, dest_y + 8, linesize, dc[1]); tgq_dconly(s, dest_y + 8, linesize, dc[1]);
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]); tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]); tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
tgq_dconly(s, dest_cb, s->frame.linesize[1], dc[4]); tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]);
tgq_dconly(s, dest_cr, s->frame.linesize[2], dc[5]); tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]);
} }
} }
static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x) static int tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
{ {
int mode; int mode;
int i; int i;
@ -160,7 +160,7 @@ static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
init_get_bits(&gb, s->gb.buffer, FFMIN(bytestream2_get_bytes_left(&s->gb), mode) * 8); init_get_bits(&gb, s->gb.buffer, FFMIN(bytestream2_get_bytes_left(&s->gb), mode) * 8);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
tgq_decode_block(s, s->block[i], &gb); tgq_decode_block(s, s->block[i], &gb);
tgq_idct_put_mb(s, s->block, mb_x, mb_y); tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
bytestream2_skip(&s->gb, mode); bytestream2_skip(&s->gb, mode);
} else { } else {
if (mode == 3) { if (mode == 3) {
@ -178,7 +178,7 @@ static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode); av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
return -1; return -1;
} }
tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc); tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
} }
return 0; return 0;
} }
@ -201,6 +201,7 @@ static int tgq_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data; TgqContext *s = avctx->priv_data;
AVFrame *frame = data;
int x, y, ret; int x, y, ret;
int big_endian; int big_endian;
@ -220,48 +221,33 @@ static int tgq_decode_frame(AVCodecContext *avctx,
if (s->avctx->width!=s->width || s->avctx->height!=s->height) { if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
} }
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb)); tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3); bytestream2_skip(&s->gb, 3);
if (!s->frame.data[0]) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
s->frame.key_frame = 1; av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
s->frame.pict_type = AV_PICTURE_TYPE_I; return ret;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
} }
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++) for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++) for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, y, x) < 0) if (tgq_decode_mb(s, frame, y, x) < 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return avpkt->size; return avpkt->size;
} }
static av_cold int tgq_decode_end(AVCodecContext *avctx)
{
TgqContext *s = avctx->priv_data;
if (s->frame.data[0])
s->avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_eatgq_decoder = { AVCodec ff_eatgq_decoder = {
.name = "eatgq", .name = "eatgq",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TGQ, .id = AV_CODEC_ID_TGQ,
.priv_data_size = sizeof(TgqContext), .priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init, .init = tgq_decode_init,
.close = tgq_decode_end,
.decode = tgq_decode_frame, .decode = tgq_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"), .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"),

View File

@ -31,6 +31,7 @@
#include "avcodec.h" #include "avcodec.h"
#define BITSTREAM_READER_LE #define BITSTREAM_READER_LE
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
@ -39,8 +40,8 @@
typedef struct TgvContext { typedef struct TgvContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
AVFrame last_frame; AVFrame last_frame;
uint8_t *frame_buffer;
int width,height; int width,height;
uint32_t palette[AVPALETTE_COUNT]; uint32_t palette[AVPALETTE_COUNT];
@ -56,7 +57,6 @@ static av_cold int tgv_decode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
avctx->time_base = (AVRational){1, 15}; avctx->time_base = (AVRational){1, 15};
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->frame);
avcodec_get_frame_defaults(&s->last_frame); avcodec_get_frame_defaults(&s->last_frame);
return 0; return 0;
} }
@ -140,8 +140,8 @@ static int unpack(const uint8_t *src, const uint8_t *src_end,
* Decode inter-frame * Decode inter-frame
* @return 0 on success, -1 on critical buffer underflow * @return 0 on success, -1 on critical buffer underflow
*/ */
static int tgv_decode_inter(TgvContext *s, const uint8_t *buf, static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
const uint8_t *buf_end) const uint8_t *buf, const uint8_t *buf_end)
{ {
int num_mvs; int num_mvs;
int num_blocks_raw; int num_blocks_raw;
@ -241,22 +241,13 @@ static int tgv_decode_inter(TgvContext *s, const uint8_t *buf,
for (j = 0; j < 4; j++) for (j = 0; j < 4; j++)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
s->frame.data[0][(y * 4 + j) * s->frame.linesize[0] + (x * 4 + i)] = frame->data[0][(y * 4 + j) * frame->linesize[0] + (x * 4 + i)] =
src[j * src_stride + i]; src[j * src_stride + i];
} }
return 0; return 0;
} }
/** release AVFrame buffers if allocated */
static void cond_release_buffer(AVFrame *pic)
{
if (pic->data[0]) {
av_freep(&pic->data[0]);
av_free(pic->data[1]);
}
}
static int tgv_decode_frame(AVCodecContext *avctx, static int tgv_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
@ -265,6 +256,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
TgvContext *s = avctx->priv_data; TgvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
AVFrame *frame = data;
int chunk_type, ret; int chunk_type, ret;
if (buf_end - buf < EA_PREAMBLE_SIZE) if (buf_end - buf < EA_PREAMBLE_SIZE)
@ -284,8 +276,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
s->height = AV_RL16(&buf[2]); s->height = AV_RL16(&buf[2]);
if (s->avctx->width != s->width || s->avctx->height != s->height) { if (s->avctx->width != s->width || s->avctx->height != s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
cond_release_buffer(&s->frame); av_freep(&s->frame_buffer);
cond_release_buffer(&s->last_frame); av_frame_unref(&s->last_frame);
} }
pal_count = AV_RL16(&buf[6]); pal_count = AV_RL16(&buf[6]);
@ -299,46 +291,46 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0) if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0)
return ret; return ret;
/* shuffle */ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
FFSWAP(AVFrame, s->frame, s->last_frame); return ret;
if (!s->frame.data[0]) {
s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.linesize[0] = s->width;
s->frame.data[0] = av_malloc(s->width * s->height); memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
if (!s->frame.data[0])
return AVERROR(ENOMEM);
s->frame.data[1] = av_malloc(AVPALETTE_SIZE);
if (!s->frame.data[1]) {
av_freep(&s->frame.data[0]);
return AVERROR(ENOMEM);
}
}
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
if (chunk_type == kVGT_TAG) { if (chunk_type == kVGT_TAG) {
s->frame.key_frame = 1; int y;
s->frame.pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1;
if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height) < 0) { frame->pict_type = AV_PICTURE_TYPE_I;
if (!s->frame_buffer &&
!(s->frame_buffer = av_malloc(s->width * s->height)))
return AVERROR(ENOMEM);
if (unpack(buf, buf_end, s->frame_buffer, s->avctx->width, s->avctx->height) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
for (y = 0; y < s->height; y++)
memcpy(frame->data[0] + y * frame->linesize[0],
s->frame_buffer + y * s->width,
s->width);
} else { } else {
if (!s->last_frame.data[0]) { if (!s->last_frame.data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n"); av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size; return buf_size;
} }
s->frame.key_frame = 0; frame->key_frame = 0;
s->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
if (tgv_decode_inter(s, buf, buf_end) < 0) { if (tgv_decode_inter(s, frame, buf, buf_end) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
} }
av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -346,8 +338,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
static av_cold int tgv_decode_end(AVCodecContext *avctx) static av_cold int tgv_decode_end(AVCodecContext *avctx)
{ {
TgvContext *s = avctx->priv_data; TgvContext *s = avctx->priv_data;
cond_release_buffer(&s->frame); av_frame_unref(&s->last_frame);
cond_release_buffer(&s->last_frame); av_freep(&s->frame_buffer);
av_free(s->mv_codebook); av_free(s->mv_codebook);
av_free(s->block_codebook); av_free(s->block_codebook);
return 0; return 0;
@ -362,4 +354,5 @@ AVCodec ff_eatgv_decoder = {
.close = tgv_decode_end, .close = tgv_decode_end,
.decode = tgv_decode_frame, .decode = tgv_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"), .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"),
.capabilities = CODEC_CAP_DR1,
}; };

View File

@ -36,7 +36,6 @@
typedef struct TqiContext { typedef struct TqiContext {
MpegEncContext s; MpegEncContext s;
AVFrame frame;
void *bitstream_buf; void *bitstream_buf;
unsigned int bitstream_buf_size; unsigned int bitstream_buf_size;
DECLARE_ALIGNED(16, int16_t, block)[6][64]; DECLARE_ALIGNED(16, int16_t, block)[6][64];
@ -68,21 +67,21 @@ static int tqi_decode_mb(MpegEncContext *s, int16_t (*block)[64])
return 0; return 0;
} }
static inline void tqi_idct_put(TqiContext *t, int16_t (*block)[64]) static inline void tqi_idct_put(TqiContext *t, AVFrame *frame, int16_t (*block)[64])
{ {
MpegEncContext *s = &t->s; MpegEncContext *s = &t->s;
int linesize= t->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = t->frame.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16; uint8_t *dest_y = frame->data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
uint8_t *dest_cb = t->frame.data[1] + (s->mb_y * 8 * t->frame.linesize[1]) + s->mb_x * 8; uint8_t *dest_cb = frame->data[1] + (s->mb_y * 8 * frame->linesize[1]) + s->mb_x * 8;
uint8_t *dest_cr = t->frame.data[2] + (s->mb_y * 8 * t->frame.linesize[2]) + s->mb_x * 8; uint8_t *dest_cr = frame->data[2] + (s->mb_y * 8 * frame->linesize[2]) + s->mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]); ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]); ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]); ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]);
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) { if(!(s->avctx->flags&CODEC_FLAG_GRAY)) {
ff_ea_idct_put_c(dest_cb, t->frame.linesize[1], block[4]); ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
ff_ea_idct_put_c(dest_cr, t->frame.linesize[2], block[5]); ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -104,21 +103,20 @@ static int tqi_decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
TqiContext *t = avctx->priv_data; TqiContext *t = avctx->priv_data;
MpegEncContext *s = &t->s; MpegEncContext *s = &t->s;
AVFrame *frame = data;
int ret;
s->width = AV_RL16(&buf[0]); s->width = AV_RL16(&buf[0]);
s->height = AV_RL16(&buf[2]); s->height = AV_RL16(&buf[2]);
tqi_calculate_qtable(s, buf[4]); tqi_calculate_qtable(s, buf[4]);
buf += 8; buf += 8;
if (t->frame.data[0])
avctx->release_buffer(avctx, &t->frame);
if (s->avctx->width!=s->width || s->avctx->height!=s->height) if (s->avctx->width!=s->width || s->avctx->height!=s->height)
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
if(ff_get_buffer(avctx, &t->frame) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size, av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size,
@ -134,20 +132,17 @@ static int tqi_decode_frame(AVCodecContext *avctx,
{ {
if (tqi_decode_mb(s, t->block) < 0) if (tqi_decode_mb(s, t->block) < 0)
goto end; goto end;
tqi_idct_put(t, t->block); tqi_idct_put(t, frame, t->block);
} }
end: end:
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = t->frame;
return buf_size; return buf_size;
} }
static av_cold int tqi_decode_end(AVCodecContext *avctx) static av_cold int tqi_decode_end(AVCodecContext *avctx)
{ {
TqiContext *t = avctx->priv_data; TqiContext *t = avctx->priv_data;
if(t->frame.data[0])
avctx->release_buffer(avctx, &t->frame);
av_free(t->bitstream_buf); av_free(t->bitstream_buf);
return 0; return 0;
} }

View File

@ -147,7 +147,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for(b_x=0; b_x<w; b_x++){ for(b_x=0; b_x<w; b_x++){
int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j]; int error_j= s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&ER_DC_ERROR)){ if(intra_j==0 || !(error_j&ER_DC_ERROR)){
color= dc[b_x + b_y*stride]; color= dc[b_x + b_y*stride];
distance= b_x; distance= b_x;
@ -160,7 +160,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for(b_x=w-1; b_x>=0; b_x--){ for(b_x=w-1; b_x>=0; b_x--){
int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j]; int error_j= s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&ER_DC_ERROR)){ if(intra_j==0 || !(error_j&ER_DC_ERROR)){
color= dc[b_x + b_y*stride]; color= dc[b_x + b_y*stride];
distance= b_x; distance= b_x;
@ -175,7 +175,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for(b_y=0; b_y<h; b_y++){ for(b_y=0; b_y<h; b_y++){
int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j]; int error_j= s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&ER_DC_ERROR)){ if(intra_j==0 || !(error_j&ER_DC_ERROR)){
color= dc[b_x + b_y*stride]; color= dc[b_x + b_y*stride];
distance= b_y; distance= b_y;
@ -188,7 +188,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for(b_y=h-1; b_y>=0; b_y--){ for(b_y=h-1; b_y>=0; b_y--){
int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j]; int error_j= s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&ER_DC_ERROR)){ if(intra_j==0 || !(error_j&ER_DC_ERROR)){
color= dc[b_x + b_y*stride]; color= dc[b_x + b_y*stride];
distance= b_y; distance= b_y;
@ -205,7 +205,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
error = s->error_status_table[mb_index]; error = s->error_status_table[mb_index];
if (IS_INTER(s->cur_pic->f.mb_type[mb_index])) if (IS_INTER(s->cur_pic->mb_type[mb_index]))
continue; // inter continue; // inter
if (!(error & ER_DC_ERROR)) if (!(error & ER_DC_ERROR))
continue; // dc-ok continue; // dc-ok
@ -246,13 +246,13 @@ static void h_block_filter(ERContext *s, uint8_t *dst, int w,
int y; int y;
int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int left_intra = IS_INTRA(s->cur_pic->f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int right_intra = IS_INTRA(s->cur_pic->f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status & ER_MB_ERROR; int left_damage = left_status & ER_MB_ERROR;
int right_damage = right_status & ER_MB_ERROR; int right_damage = right_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8; int offset = b_x * 8 + b_y * stride * 8;
int16_t *left_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
int16_t *right_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
if (!(left_damage || right_damage)) if (!(left_damage || right_damage))
continue; // both undamaged continue; // both undamaged
if ((!left_intra) && (!right_intra) && if ((!left_intra) && (!right_intra) &&
@ -314,14 +314,14 @@ static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
int x; int x;
int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]; int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
int top_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
int bottom_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status & ER_MB_ERROR; int top_damage = top_status & ER_MB_ERROR;
int bottom_damage = bottom_status & ER_MB_ERROR; int bottom_damage = bottom_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8; int offset = b_x * 8 + b_y * stride * 8;
int16_t *top_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
int16_t *bottom_mv = s->cur_pic->f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if (!(top_damage || bottom_damage)) if (!(top_damage || bottom_damage))
continue; // both undamaged continue; // both undamaged
@ -386,7 +386,7 @@ static void guess_mv(ERContext *s)
int f = 0; int f = 0;
int error = s->error_status_table[mb_xy]; int error = s->error_status_table[mb_xy];
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
f = MV_FROZEN; // intra // FIXME check f = MV_FROZEN; // intra // FIXME check
if (!(error & ER_MV_ERROR)) if (!(error & ER_MV_ERROR))
f = MV_FROZEN; // inter with undamaged MV f = MV_FROZEN; // inter with undamaged MV
@ -394,13 +394,13 @@ static void guess_mv(ERContext *s)
fixed[mb_xy] = f; fixed[mb_xy] = f;
if (f == MV_FROZEN) if (f == MV_FROZEN)
num_avail++; num_avail++;
else if(s->last_pic->f.data[0] && s->last_pic->f.motion_val[0]){ else if(s->last_pic->f.data[0] && s->last_pic->motion_val[0]){
const int mb_y= mb_xy / s->mb_stride; const int mb_y= mb_xy / s->mb_stride;
const int mb_x= mb_xy % s->mb_stride; const int mb_x= mb_xy % s->mb_stride;
const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
s->cur_pic->f.motion_val[0][mot_index][0]= s->last_pic->f.motion_val[0][mot_index][0]; s->cur_pic->motion_val[0][mot_index][0]= s->last_pic->motion_val[0][mot_index][0];
s->cur_pic->f.motion_val[0][mot_index][1]= s->last_pic->f.motion_val[0][mot_index][1]; s->cur_pic->motion_val[0][mot_index][1]= s->last_pic->motion_val[0][mot_index][1];
s->cur_pic->f.ref_index[0][4*mb_xy] = s->last_pic->f.ref_index[0][4*mb_xy]; s->cur_pic->ref_index[0][4*mb_xy] = s->last_pic->ref_index[0][4*mb_xy];
} }
} }
@ -411,7 +411,7 @@ static void guess_mv(ERContext *s)
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
continue; continue;
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
continue; continue;
@ -452,7 +452,7 @@ static void guess_mv(ERContext *s)
if (fixed[mb_xy] == MV_FROZEN) if (fixed[mb_xy] == MV_FROZEN)
continue; continue;
av_assert1(!IS_INTRA(s->cur_pic->f.mb_type[mb_xy])); av_assert1(!IS_INTRA(s->cur_pic->mb_type[mb_xy]));
av_assert1(s->last_pic && s->last_pic->f.data[0]); av_assert1(s->last_pic && s->last_pic->f.data[0]);
j = 0; j = 0;
@ -483,38 +483,38 @@ static void guess_mv(ERContext *s)
if (mb_x > 0 && fixed[mb_xy - 1]) { if (mb_x > 0 && fixed[mb_xy - 1]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index - mot_step][0]; s->cur_pic->motion_val[0][mot_index - mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index - mot_step][1]; s->cur_pic->motion_val[0][mot_index - mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy - 1)]; s->cur_pic->ref_index[0][4 * (mb_xy - 1)];
pred_count++; pred_count++;
} }
if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index + mot_step][0]; s->cur_pic->motion_val[0][mot_index + mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index + mot_step][1]; s->cur_pic->motion_val[0][mot_index + mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy + 1)]; s->cur_pic->ref_index[0][4 * (mb_xy + 1)];
pred_count++; pred_count++;
} }
if (mb_y > 0 && fixed[mb_xy - mb_stride]) { if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][0]; s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][1]; s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy - s->mb_stride)]; s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)];
pred_count++; pred_count++;
} }
if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) { if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][0]; s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][1]; s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy + s->mb_stride)]; s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)];
pred_count++; pred_count++;
} }
if (pred_count == 0) if (pred_count == 0)
@ -572,19 +572,19 @@ skip_mean_and_median:
if (s->avctx->codec_id == AV_CODEC_ID_H264) { if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress(&s->last_pic->f, ff_thread_await_progress(&s->last_pic->tf,
mb_y, 0); mb_y, 0);
} }
if (!s->last_pic->f.motion_val[0] || if (!s->last_pic->motion_val[0] ||
!s->last_pic->f.ref_index[0]) !s->last_pic->ref_index[0])
goto skip_last_mv; goto skip_last_mv;
prev_x = s->last_pic->f.motion_val[0][mot_index][0]; prev_x = s->last_pic->motion_val[0][mot_index][0];
prev_y = s->last_pic->f.motion_val[0][mot_index][1]; prev_y = s->last_pic->motion_val[0][mot_index][1];
prev_ref = s->last_pic->f.ref_index[0][4 * mb_xy]; prev_ref = s->last_pic->ref_index[0][4 * mb_xy];
} else { } else {
prev_x = s->cur_pic->f.motion_val[0][mot_index][0]; prev_x = s->cur_pic->motion_val[0][mot_index][0];
prev_y = s->cur_pic->f.motion_val[0][mot_index][1]; prev_y = s->cur_pic->motion_val[0][mot_index][1];
prev_ref = s->cur_pic->f.ref_index[0][4 * mb_xy]; prev_ref = s->cur_pic->ref_index[0][4 * mb_xy];
} }
/* last MV */ /* last MV */
@ -601,9 +601,9 @@ skip_last_mv:
uint8_t *src = s->cur_pic->f.data[0] + uint8_t *src = s->cur_pic->f.data[0] +
mb_x * 16 + mb_y * 16 * linesize[0]; mb_x * 16 + mb_y * 16 * linesize[0];
s->cur_pic->f.motion_val[0][mot_index][0] = s->cur_pic->motion_val[0][mot_index][0] =
s->mv[0][0][0] = mv_predictor[j][0]; s->mv[0][0][0] = mv_predictor[j][0];
s->cur_pic->f.motion_val[0][mot_index][1] = s->cur_pic->motion_val[0][mot_index][1] =
s->mv[0][0][1] = mv_predictor[j][1]; s->mv[0][0][1] = mv_predictor[j][1];
// predictor intra or otherwise not available // predictor intra or otherwise not available
@ -648,8 +648,8 @@ skip_last_mv:
for (i = 0; i < mot_step; i++) for (i = 0; i < mot_step; i++)
for (j = 0; j < mot_step; j++) { for (j = 0; j < mot_step; j++) {
s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
} }
s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
@ -731,7 +731,7 @@ static int is_intra_more_likely(ERContext *s)
if (s->avctx->codec_id == AV_CODEC_ID_H264) { if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress(&s->last_pic->f, mb_y, 0); ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
} }
is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr, is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
linesize[0], 16); linesize[0], 16);
@ -740,7 +740,7 @@ static int is_intra_more_likely(ERContext *s)
last_mb_ptr + linesize[0] * 16, last_mb_ptr + linesize[0] * 16,
linesize[0], 16); linesize[0], 16);
} else { } else {
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
is_intra_likely++; is_intra_likely++;
else else
is_intra_likely--; is_intra_likely--;
@ -875,13 +875,25 @@ void ff_er_frame_end(ERContext *s)
} }
} }
if (s->cur_pic->f.motion_val[0] == NULL) { if (s->cur_pic->motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
s->cur_pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t)); s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
s->cur_pic->f.motion_val[i] = s->cur_pic->motion_val_base[i] + 4; if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i])
break;
s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data;
s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
}
if (i < 2) {
for (i = 0; i < 2; i++) {
av_buffer_unref(&s->cur_pic->ref_index_buf[i]);
av_buffer_unref(&s->cur_pic->motion_val_buf[i]);
s->cur_pic->ref_index[i] = NULL;
s->cur_pic->motion_val[i] = NULL;
}
return;
} }
s->cur_pic->f.motion_subsample_log2 = 3; s->cur_pic->f.motion_subsample_log2 = 3;
} }
@ -1046,9 +1058,9 @@ void ff_er_frame_end(ERContext *s)
continue; continue;
if (is_intra_likely) if (is_intra_likely)
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else else
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
} }
// change inter to intra blocks if no reference frames are available // change inter to intra blocks if no reference frames are available
@ -1056,15 +1068,15 @@ void ff_er_frame_end(ERContext *s)
!(s->next_pic && s->next_pic->f.data[0])) !(s->next_pic && s->next_pic->f.data[0]))
for (i = 0; i < s->mb_num; i++) { for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i]; const int mb_xy = s->mb_index2xy[i];
if (!IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (!IS_INTRA(s->cur_pic->mb_type[mb_xy]))
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
} }
/* handle inter blocks with damaged AC */ /* handle inter blocks with damaged AC */
for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
const int dir = !(s->last_pic && s->last_pic->f.data[0]); const int dir = !(s->last_pic && s->last_pic->f.data[0]);
const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
int mv_type; int mv_type;
@ -1083,13 +1095,13 @@ void ff_er_frame_end(ERContext *s)
int j; int j;
mv_type = MV_TYPE_8X8; mv_type = MV_TYPE_8X8;
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
s->mv[0][j][0] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
s->mv[0][j][1] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
} }
} else { } else {
mv_type = MV_TYPE_16X16; mv_type = MV_TYPE_16X16;
s->mv[0][0][0] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
s->mv[0][0][1] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
} }
s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */, s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
@ -1103,7 +1115,7 @@ void ff_er_frame_end(ERContext *s)
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];
@ -1125,12 +1137,12 @@ void ff_er_frame_end(ERContext *s)
int time_pb = s->pb_time; int time_pb = s->pb_time;
av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264); av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264);
ff_thread_await_progress(&s->next_pic->f, mb_y, 0); ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
s->mv[0][0][0] = s->next_pic->f.motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp;
s->mv[0][0][1] = s->next_pic->f.motion_val[0][xy][1] * time_pb / time_pp; s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp;
s->mv[1][0][0] = s->next_pic->f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
s->mv[1][0][1] = s->next_pic->f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
} else { } else {
s->mv[0][0][0] = 0; s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0; s->mv[0][0][1] = 0;
@ -1155,7 +1167,7 @@ void ff_er_frame_end(ERContext *s)
int16_t *dc_ptr; int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];
@ -1208,7 +1220,7 @@ void ff_er_frame_end(ERContext *s)
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];

View File

@ -79,8 +79,7 @@ static av_cold int escape124_decode_close(AVCodecContext *avctx)
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
av_free(s->codebooks[i].blocks); av_free(s->codebooks[i].blocks);
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }
@ -204,6 +203,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
Escape124Context *s = avctx->priv_data; Escape124Context *s = avctx->priv_data;
AVFrame *frame = data;
GetBitContext gb; GetBitContext gb;
unsigned frame_flags, frame_size; unsigned frame_flags, frame_size;
@ -216,8 +216,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
uint16_t* old_frame_data, *new_frame_data; uint16_t* old_frame_data, *new_frame_data;
unsigned old_stride, new_stride; unsigned old_stride, new_stride;
AVFrame new_frame; int ret;
avcodec_get_frame_defaults(&new_frame);
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -232,10 +231,14 @@ static int escape124_decode_frame(AVCodecContext *avctx,
// Leave last frame unchanged // Leave last frame unchanged
// FIXME: Is this necessary? I haven't seen it in any real samples // FIXME: Is this necessary? I haven't seen it in any real samples
if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
if (!s->frame.data[0])
return AVERROR_INVALIDDATA;
av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n"); av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n");
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(frame, &s->frame)) < 0)
return ret;
return frame_size; return frame_size;
} }
@ -268,14 +271,13 @@ static int escape124_decode_frame(AVCodecContext *avctx,
} }
} }
new_frame.reference = 3; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (ff_get_buffer(avctx, &new_frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
new_frame_data = (uint16_t*)new_frame.data[0]; new_frame_data = (uint16_t*)frame->data[0];
new_stride = new_frame.linesize[0] / 2; new_stride = frame->linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame.data[0]; old_frame_data = (uint16_t*)s->frame.data[0];
old_stride = s->frame.linesize[0] / 2; old_stride = s->frame.linesize[0] / 2;
@ -356,10 +358,10 @@ static int escape124_decode_frame(AVCodecContext *avctx,
"Escape sizes: %i, %i, %i\n", "Escape sizes: %i, %i, %i\n",
frame_size, buf_size, get_bits_count(&gb) / 8); frame_size, buf_size, get_bits_count(&gb) / 8);
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame); if ((ret = av_frame_ref(&s->frame, frame)) < 0)
return ret;
*(AVFrame*)data = s->frame = new_frame;
*got_frame = 1; *got_frame = 1;
return frame_size; return frame_size;

View File

@ -40,6 +40,7 @@ static av_cold int escape130_decode_init(AVCodecContext *avctx)
{ {
Escape130Context *s = avctx->priv_data; Escape130Context *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_get_frame_defaults(&s->frame);
if((avctx->width&1) || (avctx->height&1)){ if((avctx->width&1) || (avctx->height&1)){
av_log(avctx, AV_LOG_ERROR, "Dimensions are not a multiple of the block size\n"); av_log(avctx, AV_LOG_ERROR, "Dimensions are not a multiple of the block size\n");
@ -55,8 +56,7 @@ static av_cold int escape130_decode_close(AVCodecContext *avctx)
{ {
Escape130Context *s = avctx->priv_data; Escape130Context *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->bases); av_freep(&s->bases);
@ -108,6 +108,7 @@ static int escape130_decode_frame(AVCodecContext *avctx,
GetBitContext gb; GetBitContext gb;
unsigned i; unsigned i;
int ret;
uint8_t *old_y, *old_cb, *old_cr, uint8_t *old_y, *old_cb, *old_cr,
*new_y, *new_cb, *new_cr; *new_y, *new_cb, *new_cr;
@ -120,7 +121,7 @@ static int escape130_decode_frame(AVCodecContext *avctx,
unsigned y_base = 0; unsigned y_base = 0;
uint8_t *yb= s->bases; uint8_t *yb= s->bases;
AVFrame new_frame = { { 0 } }; AVFrame *frame = data;
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -130,18 +131,17 @@ static int escape130_decode_frame(AVCodecContext *avctx,
// Header; no useful information in here // Header; no useful information in here
skip_bits_long(&gb, 128); skip_bits_long(&gb, 128);
new_frame.reference = 3; if (ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF) < 0) {
if (ff_get_buffer(avctx, &new_frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
new_y = new_frame.data[0]; new_y = frame->data[0];
new_cb = new_frame.data[1]; new_cb = frame->data[1];
new_cr = new_frame.data[2]; new_cr = frame->data[2];
new_y_stride = new_frame.linesize[0]; new_y_stride = frame->linesize[0];
new_cb_stride = new_frame.linesize[1]; new_cb_stride = frame->linesize[1];
new_cr_stride = new_frame.linesize[2]; new_cr_stride = frame->linesize[2];
old_y = s->frame.data[0]; old_y = s->frame.data[0];
old_cb = s->frame.data[1]; old_cb = s->frame.data[1];
old_cr = s->frame.data[2]; old_cr = s->frame.data[2];
@ -298,10 +298,10 @@ static int escape130_decode_frame(AVCodecContext *avctx,
"Escape sizes: %i, %i\n", "Escape sizes: %i, %i\n",
buf_size, get_bits_count(&gb) / 8); buf_size, get_bits_count(&gb) / 8);
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame); if ((ret = av_frame_ref(&s->frame, frame)) < 0)
return ret;
*(AVFrame*)data = s->frame = new_frame;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;

View File

@ -746,7 +746,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
int i, j, ret, error_flag = 0; int i, j, ret, error_flag = 0;
frame->nb_samples = 160; frame->nb_samples = 160;
if ((ret = ff_get_buffer(avctx, frame)) < 0) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret; return ret;
samples = (float *)frame->data[0]; samples = (float *)frame->data[0];

View File

@ -70,7 +70,7 @@ typedef struct EXRThreadData {
} EXRThreadData; } EXRThreadData;
typedef struct EXRContext { typedef struct EXRContext {
AVFrame picture; AVFrame *picture;
int compr; int compr;
enum ExrPixelType pixel_type; enum ExrPixelType pixel_type;
int channel_offsets[4]; // 0 = red, 1 = green, 2 = blue and 3 = alpha int channel_offsets[4]; // 0 = red, 1 = green, 2 = blue and 3 = alpha
@ -336,7 +336,7 @@ static int decode_block(AVCodecContext *avctx, void *tdata,
int jobnr, int threadnr) int jobnr, int threadnr)
{ {
EXRContext *s = avctx->priv_data; EXRContext *s = avctx->priv_data;
AVFrame *const p = &s->picture; AVFrame *const p = s->picture;
EXRThreadData *td = &s->thread_data[threadnr]; EXRThreadData *td = &s->thread_data[threadnr];
const uint8_t *channel_buffer[4] = { 0 }; const uint8_t *channel_buffer[4] = { 0 };
const uint8_t *buf = s->buf; const uint8_t *buf = s->buf;
@ -458,8 +458,8 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
EXRContext *const s = avctx->priv_data; EXRContext *const s = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *picture = data; AVFrame *picture = data;
AVFrame *const p = &s->picture;
uint8_t *ptr; uint8_t *ptr;
int i, y, magic_number, version, flags, ret; int i, y, magic_number, version, flags, ret;
@ -718,8 +718,6 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (s->picture.data[0])
ff_thread_release_buffer(avctx, &s->picture);
if (av_image_check_size(w, h, 0, avctx)) if (av_image_check_size(w, h, 0, avctx))
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -756,7 +754,7 @@ static int decode_frame(AVCodecContext *avctx,
memset(s->thread_data + prev_size, 0, s->thread_data_size - prev_size); memset(s->thread_data + prev_size, 0, s->thread_data_size - prev_size);
} }
if ((ret = ff_thread_get_buffer(avctx, p)) < 0) { if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -764,46 +762,33 @@ static int decode_frame(AVCodecContext *avctx,
if (buf_end - buf < scan_line_blocks * 8) if (buf_end - buf < scan_line_blocks * 8)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
s->table = buf; s->table = buf;
ptr = p->data[0]; ptr = picture->data[0];
// Zero out the start if ymin is not 0 // Zero out the start if ymin is not 0
for (y = 0; y < s->ymin; y++) { for (y = 0; y < s->ymin; y++) {
memset(ptr, 0, out_line_size); memset(ptr, 0, out_line_size);
ptr += p->linesize[0]; ptr += picture->linesize[0];
} }
s->picture = picture;
avctx->execute2(avctx, decode_block, s->thread_data, NULL, scan_line_blocks); avctx->execute2(avctx, decode_block, s->thread_data, NULL, scan_line_blocks);
// Zero out the end if ymax+1 is not h // Zero out the end if ymax+1 is not h
for (y = s->ymax + 1; y < avctx->height; y++) { for (y = s->ymax + 1; y < avctx->height; y++) {
memset(ptr, 0, out_line_size); memset(ptr, 0, out_line_size);
ptr += p->linesize[0]; ptr += picture->linesize[0];
} }
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int decode_init(AVCodecContext *avctx)
{
EXRContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
EXRContext *s = avctx->priv_data; EXRContext *s = avctx->priv_data;
int i; int i;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
for (i = 0; i < s->thread_data_size / sizeof(EXRThreadData); i++) { for (i = 0; i < s->thread_data_size / sizeof(EXRThreadData); i++) {
EXRThreadData *td = &s->thread_data[i]; EXRThreadData *td = &s->thread_data[i];
av_free(td->uncompressed_data); av_free(td->uncompressed_data);
@ -822,7 +807,6 @@ AVCodec ff_exr_decoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_EXR, .id = AV_CODEC_ID_EXR,
.priv_data_size = sizeof(EXRContext), .priv_data_size = sizeof(EXRContext),
.init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS,

View File

@ -191,10 +191,7 @@ av_cold int ffv1_close(AVCodecContext *avctx)
FFV1Context *s = avctx->priv_data; FFV1Context *s = avctx->priv_data;
int i, j; int i, j;
if (avctx->codec->decode && s->picture.data[0]) av_frame_unref(&s->last_picture);
avctx->release_buffer(avctx, &s->picture);
if (avctx->codec->decode && s->last_picture.data[0])
avctx->release_buffer(avctx, &s->last_picture);
for (j = 0; j < s->slice_count; j++) { for (j = 0; j < s->slice_count; j++) {
FFV1Context *fs = s->slice_context[j]; FFV1Context *fs = s->slice_context[j];

View File

@ -89,8 +89,9 @@ typedef struct FFV1Context {
int transparency; int transparency;
int flags; int flags;
int picture_number; int picture_number;
AVFrame picture; AVFrame picture, last_picture;
AVFrame last_picture;
AVFrame *cur;
int plane_count; int plane_count;
int ac; ///< 1=range coder <-> 0=golomb rice int ac; ///< 1=range coder <-> 0=golomb rice
int ac_byte_count; ///< number of bytes used for AC coding int ac_byte_count; ///< number of bytes used for AC coding

View File

@ -306,16 +306,16 @@ static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
ps = get_symbol(c, state, 0); ps = get_symbol(c, state, 0);
if (ps == 1) { if (ps == 1) {
f->picture.interlaced_frame = 1; f->cur->interlaced_frame = 1;
f->picture.top_field_first = 1; f->cur->top_field_first = 1;
} else if (ps == 2) { } else if (ps == 2) {
f->picture.interlaced_frame = 1; f->cur->interlaced_frame = 1;
f->picture.top_field_first = 0; f->cur->top_field_first = 0;
} else if (ps == 3) { } else if (ps == 3) {
f->picture.interlaced_frame = 0; f->cur->interlaced_frame = 0;
} }
f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0); f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0); f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
return 0; return 0;
} }
@ -326,7 +326,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
FFV1Context *f = fs->avctx->priv_data; FFV1Context *f = fs->avctx->priv_data;
int width, height, x, y, ret; int width, height, x, y, ret;
const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1; const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1;
AVFrame * const p = &f->picture; AVFrame * const p = f->cur;
if (f->version > 2) { if (f->version > 2) {
if (ffv1_init_slice_state(f, fs) < 0) if (ffv1_init_slice_state(f, fs) < 0)
@ -338,7 +338,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
} }
if ((ret = ffv1_init_slice_state(f, fs)) < 0) if ((ret = ffv1_init_slice_state(f, fs)) < 0)
return ret; return ret;
if (f->picture.key_frame) if (f->cur->key_frame)
ffv1_clear_slice_state(f, fs); ffv1_clear_slice_state(f, fs);
width = fs->slice_width; width = fs->slice_width;
@ -732,16 +732,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
int buf_size = avpkt->size; int buf_size = avpkt->size;
FFV1Context *f = avctx->priv_data; FFV1Context *f = avctx->priv_data;
RangeCoder *const c = &f->slice_context[0]->c; RangeCoder *const c = &f->slice_context[0]->c;
AVFrame *const p = &f->picture;
int i, ret; int i, ret;
uint8_t keystate = 128; uint8_t keystate = 128;
const uint8_t *buf_p; const uint8_t *buf_p;
AVFrame *const p = data;
AVFrame *picture = data; f->cur = p;
/* release previously stored data */
if (p->data[0])
avctx->release_buffer(avctx, p);
ff_init_range_decoder(c, buf, buf_size); ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
@ -762,8 +758,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
p->key_frame = 0; p->key_frame = 0;
} }
p->reference = 3; //for error concealment if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0) {
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -806,6 +801,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
ff_init_range_decoder(&fs->c, buf_p, v); ff_init_range_decoder(&fs->c, buf_p, v);
} else } else
fs->c.bytestream_end = (uint8_t *)(buf_p + v); fs->c.bytestream_end = (uint8_t *)(buf_p + v);
fs->cur = p;
} }
avctx->execute(avctx, avctx->execute(avctx,
@ -824,14 +821,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
int sh = (j==1 || j==2) ? f->chroma_h_shift : 0; int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
int sv = (j==1 || j==2) ? f->chroma_v_shift : 0; int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
dst[j] = f->picture .data[j] + f->picture .linesize[j]* dst[j] = p->data[j] + p->linesize[j]*
(fs->slice_y>>sv) + (fs->slice_x>>sh); (fs->slice_y>>sv) + (fs->slice_x>>sh);
src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]* src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
(fs->slice_y>>sv) + (fs->slice_x>>sh); (fs->slice_y>>sv) + (fs->slice_x>>sh);
} }
av_image_copy(dst, av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->picture.linesize,
(const uint8_t **)src,
f->last_picture.linesize, f->last_picture.linesize,
avctx->pix_fmt, avctx->pix_fmt,
fs->slice_width, fs->slice_width,
@ -841,10 +836,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
f->picture_number++; f->picture_number++;
*picture = *p; av_frame_unref(&f->last_picture);
*got_frame = 1; if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
return ret;
f->cur = NULL;
FFSWAP(AVFrame, f->picture, f->last_picture); *got_frame = 1;
return buf_size; return buf_size;
} }

View File

@ -444,7 +444,7 @@ static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
if (duration <= 0) if (duration <= 0)
return AVERROR(EINVAL); return AVERROR(EINVAL);
ws->frame.nb_samples = duration; ws->frame.nb_samples = duration;
r = ff_get_buffer(avc, &ws->frame); r = ff_get_buffer(avc, &ws->frame, 0);
if (r < 0) if (r < 0)
return r; return r;
pcm = (int16_t *)ws->frame.data[0]; pcm = (int16_t *)ws->frame.data[0];

View File

@ -545,7 +545,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->blocksize; frame->nb_samples = s->blocksize;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -41,6 +41,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
typedef struct BlockInfo { typedef struct BlockInfo {
uint8_t *pos; uint8_t *pos;
@ -243,7 +244,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
{ {
int buf_size = avpkt->size; int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data; FlashSVContext *s = avctx->priv_data;
int h_blocks, v_blocks, h_part, v_part, i, j; int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb; GetBitContext gb;
int last_blockwidth = s->block_width; int last_blockwidth = s->block_width;
int last_blockheight= s->block_height; int last_blockheight= s->block_height;
@ -337,13 +338,9 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->image_width, s->image_height, s->block_width, s->block_height, s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part); h_blocks, v_blocks, h_part, v_part);
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1; return ret;
} }
/* loop over all block columns */ /* loop over all block columns */
@ -368,8 +365,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->diff_height = cur_blk_height; s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) { if (8 * size > get_bits_left(&gb)) {
avctx->release_buffer(avctx, &s->frame); av_frame_unref(&s->frame);
s->frame.data[0] = NULL;
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@ -451,8 +447,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
} }
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
if ((get_bits_count(&gb) / 8) != buf_size) if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n", av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
@ -468,8 +466,7 @@ static av_cold int flashsv_decode_end(AVCodecContext *avctx)
FlashSVContext *s = avctx->priv_data; FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream); inflateEnd(&s->zstream);
/* release the frame if needed */ /* release the frame if needed */
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
/* free the tmpblock */ /* free the tmpblock */
av_free(s->tmpblock); av_free(s->tmpblock);

View File

@ -42,6 +42,7 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
#include "mathops.h" #include "mathops.h"
#define FLI_256_COLOR 4 #define FLI_256_COLOR 4
@ -185,9 +186,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size); bytestream2_init(&g2, buf, buf_size);
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -466,8 +465,10 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
s->new_palette = 0; s->new_palette = 0;
} }
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -505,9 +506,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size); bytestream2_init(&g2, buf, buf_size);
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -752,9 +751,10 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -800,8 +800,7 @@ static av_cold int flic_decode_end(AVCodecContext *avctx)
{ {
FlicDecodeContext *s = avctx->priv_data; FlicDecodeContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -92,9 +92,9 @@ static void * attribute_align_arg worker(void *v){
ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet); ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet);
pthread_mutex_lock(&c->buffer_mutex); pthread_mutex_lock(&c->buffer_mutex);
c->parent_avctx->release_buffer(c->parent_avctx, frame); av_frame_unref(frame);
pthread_mutex_unlock(&c->buffer_mutex); pthread_mutex_unlock(&c->buffer_mutex);
av_freep(&frame); av_frame_free(&frame);
if(got_packet) { if(got_packet) {
av_dup_packet(pkt); av_dup_packet(pkt);
} else { } else {
@ -222,11 +222,11 @@ int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVF
if(frame){ if(frame){
if(!(avctx->flags & CODEC_FLAG_INPUT_PRESERVED)){ if(!(avctx->flags & CODEC_FLAG_INPUT_PRESERVED)){
AVFrame *new = avcodec_alloc_frame(); AVFrame *new = av_frame_alloc();
if(!new) if(!new)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
pthread_mutex_lock(&c->buffer_mutex); pthread_mutex_lock(&c->buffer_mutex);
ret = ff_get_buffer(c->parent_avctx, new); ret = ff_get_buffer(c->parent_avctx, new, 0);
pthread_mutex_unlock(&c->buffer_mutex); pthread_mutex_unlock(&c->buffer_mutex);
if(ret<0) if(ret<0)
return ret; return ret;

View File

@ -36,6 +36,7 @@
#include "huffman.h" #include "huffman.h"
#include "bytestream.h" #include "bytestream.h"
#include "dsputil.h" #include "dsputil.h"
#include "internal.h"
#include "thread.h" #include "thread.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x') #define FPS_TAG MKTAG('F', 'P', 'S', 'x')
@ -45,7 +46,6 @@
*/ */
typedef struct FrapsContext { typedef struct FrapsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
uint8_t *tmpbuf; uint8_t *tmpbuf;
int tmpbuf_size; int tmpbuf_size;
DSPContext dsp; DSPContext dsp;
@ -61,9 +61,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
{ {
FrapsContext * const s = avctx->priv_data; FrapsContext * const s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
s->avctx = avctx; s->avctx = avctx;
s->tmpbuf = NULL; s->tmpbuf = NULL;
@ -134,8 +131,8 @@ static int decode_frame(AVCodecContext *avctx,
FrapsContext * const s = avctx->priv_data; FrapsContext * const s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *frame = data; ThreadFrame frame = { .f = data };
AVFrame * const f = &s->frame; AVFrame * const f = data;
uint32_t header; uint32_t header;
unsigned int version,header_size; unsigned int version,header_size;
unsigned int x, y; unsigned int x, y;
@ -145,7 +142,6 @@ static int decode_frame(AVCodecContext *avctx,
int i, j, ret, is_chroma; int i, j, ret, is_chroma;
const int planes = 3; const int planes = 3;
uint8_t *out; uint8_t *out;
enum AVPixelFormat pix_fmt;
header = AV_RL32(buf); header = AV_RL32(buf);
version = header & 0xff; version = header & 0xff;
@ -200,20 +196,12 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
if (f->data[0])
ff_thread_release_buffer(avctx, f);
f->pict_type = AV_PICTURE_TYPE_I; f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1; f->key_frame = 1;
f->reference = 0;
f->buffer_hints = FF_BUFFER_HINTS_VALID;
pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P; avctx->pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
avctx->release_buffer(avctx, f);
}
avctx->pix_fmt = pix_fmt;
if ((ret = ff_thread_get_buffer(avctx, f))) { if ((ret = ff_thread_get_buffer(avctx, &frame, 0))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -296,7 +284,6 @@ static int decode_frame(AVCodecContext *avctx,
break; break;
} }
*frame = *f;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -312,9 +299,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
FrapsContext *s = (FrapsContext*)avctx->priv_data; FrapsContext *s = (FrapsContext*)avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->tmpbuf); av_freep(&s->tmpbuf);
return 0; return 0;
} }

View File

@ -38,10 +38,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
} }
avctx->pix_fmt = AV_PIX_FMT_UYVY422; avctx->pix_fmt = AV_PIX_FMT_UYVY422;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -50,13 +46,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
{ {
FRWUContext *s = avctx->priv_data; FRWUContext *s = avctx->priv_data;
int field, ret; int field, ret;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size; const uint8_t *buf_end = buf + avpkt->size;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) { if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n"); av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -66,8 +59,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
if ((ret = ff_get_buffer(avctx, pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -108,21 +100,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = *pic;
return avpkt->size; return avpkt->size;
} }
static av_cold int decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
static const AVOption frwu_options[] = { static const AVOption frwu_options[] = {
{"change_field_order", "Change field order", offsetof(FRWUContext, change_field_order), FF_OPT_TYPE_INT, {"change_field_order", "Change field order", offsetof(FRWUContext, change_field_order), FF_OPT_TYPE_INT,
{.i64 = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM}, {.i64 = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM},
@ -142,7 +123,6 @@ AVCodec ff_frwu_decoder = {
.id = AV_CODEC_ID_FRWU, .id = AV_CODEC_ID_FRWU,
.priv_data_size = sizeof(FRWUContext), .priv_data_size = sizeof(FRWUContext),
.init = decode_init, .init = decode_init,
.close = decode_close,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"), .long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),

View File

@ -94,7 +94,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avpkt->size * 2; frame->nb_samples = avpkt->size * 2;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1185,9 +1185,9 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
} }
frame->nb_samples = FRAME_LEN; frame->nb_samples = FRAME_LEN;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
out = (int16_t *)frame->data[0]; out = (int16_t *)frame->data[0];

View File

@ -449,7 +449,7 @@ static int g726_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = out_samples; frame->nb_samples = out_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -420,7 +420,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
int is_periodic = 0; // whether one of the subframes is declared as periodic or not int is_periodic = 0; // whether one of the subframes is declared as periodic or not
ctx->frame.nb_samples = SUBFRAME_SIZE<<1; ctx->frame.nb_samples = SUBFRAME_SIZE<<1;
if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) { if ((ret = ff_get_buffer(avctx, &ctx->frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -39,7 +39,7 @@
typedef struct GifState { typedef struct GifState {
const AVClass *class; const AVClass *class;
AVFrame picture; AVFrame *frame;
int screen_width; int screen_width;
int screen_height; int screen_height;
int has_global_palette; int has_global_palette;
@ -130,7 +130,7 @@ static void gif_copy_img_rect(const uint32_t *src, uint32_t *dst,
} }
} }
static int gif_read_image(GifState *s) static int gif_read_image(GifState *s, AVFrame *frame)
{ {
int left, top, width, height, bits_per_pixel, code_size, flags; int left, top, width, height, bits_per_pixel, code_size, flags;
int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size; int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
@ -173,11 +173,11 @@ static int gif_read_image(GifState *s)
if (s->keyframe) { if (s->keyframe) {
if (s->transparent_color_index == -1 && s->has_global_palette) { if (s->transparent_color_index == -1 && s->has_global_palette) {
/* transparency wasn't set before the first frame, fill with background color */ /* transparency wasn't set before the first frame, fill with background color */
gif_fill(&s->picture, s->bg_color); gif_fill(frame, s->bg_color);
} else { } else {
/* otherwise fill with transparent color. /* otherwise fill with transparent color.
* this is necessary since by default picture filled with 0x80808080. */ * this is necessary since by default picture filled with 0x80808080. */
gif_fill(&s->picture, s->trans_color); gif_fill(frame, s->trans_color);
} }
} }
@ -190,10 +190,10 @@ static int gif_read_image(GifState *s)
/* process disposal method */ /* process disposal method */
if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) { if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
gif_fill_rect(&s->picture, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h); gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
} else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) { } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
gif_copy_img_rect(s->stored_img, (uint32_t *)s->picture.data[0], gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
s->picture.linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h); frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
} }
s->gce_prev_disposal = s->gce_disposal; s->gce_prev_disposal = s->gce_disposal;
@ -208,12 +208,12 @@ static int gif_read_image(GifState *s)
else else
s->stored_bg_color = s->bg_color; s->stored_bg_color = s->bg_color;
} else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) { } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
av_fast_malloc(&s->stored_img, &s->stored_img_size, s->picture.linesize[0] * s->picture.height); av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
if (!s->stored_img) if (!s->stored_img)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
gif_copy_img_rect((uint32_t *)s->picture.data[0], s->stored_img, gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
s->picture.linesize[0] / sizeof(uint32_t), left, top, width, height); frame->linesize[0] / sizeof(uint32_t), left, top, width, height);
} }
} }
@ -230,8 +230,8 @@ static int gif_read_image(GifState *s)
} }
/* read all the image */ /* read all the image */
linesize = s->picture.linesize[0] / sizeof(uint32_t); linesize = frame->linesize[0] / sizeof(uint32_t);
ptr1 = (uint32_t *)s->picture.data[0] + top * linesize + left; ptr1 = (uint32_t *)frame->data[0] + top * linesize + left;
ptr = ptr1; ptr = ptr1;
pass = 0; pass = 0;
y1 = 0; y1 = 0;
@ -400,7 +400,7 @@ static int gif_read_header1(GifState *s)
return 0; return 0;
} }
static int gif_parse_next_image(GifState *s) static int gif_parse_next_image(GifState *s, AVFrame *frame)
{ {
while (bytestream2_get_bytes_left(&s->gb)) { while (bytestream2_get_bytes_left(&s->gb)) {
int code = bytestream2_get_byte(&s->gb); int code = bytestream2_get_byte(&s->gb);
@ -410,7 +410,7 @@ static int gif_parse_next_image(GifState *s)
switch (code) { switch (code) {
case GIF_IMAGE_SEPARATOR: case GIF_IMAGE_SEPARATOR:
return gif_read_image(s); return gif_read_image(s, frame);
case GIF_EXTENSION_INTRODUCER: case GIF_EXTENSION_INTRODUCER:
if ((ret = gif_read_extension(s)) < 0) if ((ret = gif_read_extension(s)) < 0)
return ret; return ret;
@ -433,9 +433,9 @@ static av_cold int gif_decode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_RGB32; avctx->pix_fmt = AV_PIX_FMT_RGB32;
avcodec_get_frame_defaults(&s->picture); s->frame = av_frame_alloc();
avctx->coded_frame= &s->picture; if (!s->frame)
s->picture.data[0] = NULL; return AVERROR(ENOMEM);
ff_lzw_decode_open(&s->lzw); ff_lzw_decode_open(&s->lzw);
return 0; return 0;
} }
@ -443,15 +443,14 @@ static av_cold int gif_decode_init(AVCodecContext *avctx)
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{ {
GifState *s = avctx->priv_data; GifState *s = avctx->priv_data;
AVFrame *picture = data;
int ret; int ret;
bytestream2_init(&s->gb, avpkt->data, avpkt->size); bytestream2_init(&s->gb, avpkt->data, avpkt->size);
s->picture.pts = avpkt->pts; s->frame->pts = avpkt->pts;
s->picture.pkt_pts = avpkt->pts; s->frame->pkt_pts = avpkt->pts;
s->picture.pkt_dts = avpkt->dts; s->frame->pkt_dts = avpkt->dts;
av_frame_set_pkt_duration(&s->picture, avpkt->duration); av_frame_set_pkt_duration(s->frame, avpkt->duration);
if (avpkt->size >= 6) { if (avpkt->size >= 6) {
s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 || s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
@ -469,10 +468,8 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
return ret; return ret;
avcodec_set_dimensions(avctx, s->screen_width, s->screen_height); avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
if (s->picture.data[0]) av_frame_unref(s->frame);
avctx->release_buffer(avctx, &s->picture); if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0) {
if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -481,8 +478,8 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
if (!s->idx_line) if (!s->idx_line)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->picture.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->picture.key_frame = 1; s->frame->key_frame = 1;
s->keyframe_ok = 1; s->keyframe_ok = 1;
} else { } else {
if (!s->keyframe_ok) { if (!s->keyframe_ok) {
@ -490,20 +487,21 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) { if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
s->picture.pict_type = AV_PICTURE_TYPE_P; s->frame->pict_type = AV_PICTURE_TYPE_P;
s->picture.key_frame = 0; s->frame->key_frame = 0;
} }
ret = gif_parse_next_image(s); ret = gif_parse_next_image(s, s->frame);
if (ret < 0) if (ret < 0)
return ret; return ret;
*picture = s->picture; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return avpkt->size; return avpkt->size;
@ -514,9 +512,7 @@ static av_cold int gif_decode_close(AVCodecContext *avctx)
GifState *s = avctx->priv_data; GifState *s = avctx->priv_data;
ff_lzw_decode_close(&s->lzw); ff_lzw_decode_close(&s->lzw);
if(s->picture.data[0]) av_frame_free(&s->frame);
avctx->release_buffer(avctx, &s->picture);
av_freep(&s->idx_line); av_freep(&s->idx_line);
av_freep(&s->stored_img); av_freep(&s->stored_img);

View File

@ -70,7 +70,7 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avctx->frame_size; frame->nb_samples = avctx->frame_size;
if ((res = ff_get_buffer(avctx, frame)) < 0) { if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} }

View File

@ -215,7 +215,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0; s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0; s->mv[0][0][1] = 0;
s->mb_skipped = 1; s->mb_skipped = 1;
@ -330,14 +330,14 @@ static int h261_decode_mb(H261Context *h){
} }
if(s->mb_intra){ if(s->mb_intra){
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
goto intra; goto intra;
} }
//set motion vectors //set motion vectors
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2; s->mv[0][0][1] = h->current_mv_y * 2;
@ -632,8 +632,9 @@ retry:
av_assert0(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); av_assert0(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
av_assert0(s->current_picture.f.pict_type == s->pict_type); av_assert0(s->current_picture.f.pict_type == s->pict_type);
*pict = s->current_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
ff_print_debug_info(s, pict); return ret;
ff_print_debug_info(s, s->current_picture_ptr);
*got_frame = 1; *got_frame = 1;

View File

@ -51,7 +51,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
const int wrap = s->b8_stride; const int wrap = s->b8_stride;
const int xy = s->block_index[0]; const int xy = s->block_index[0];
s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped; s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){ if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y; int motion_x, motion_y;
@ -70,30 +70,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
} }
s->current_picture.f.ref_index[0][4*mb_xy ] = s->current_picture.ref_index[0][4*mb_xy ] =
s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
s->current_picture.f.ref_index[0][4*mb_xy + 2] = s->current_picture.ref_index[0][4*mb_xy + 2] =
s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
} }
/* no update if 8X8 because it has been done during parsing */ /* no update if 8X8 because it has been done during parsing */
s->current_picture.f.motion_val[0][xy][0] = motion_x; s->current_picture.motion_val[0][xy][0] = motion_x;
s->current_picture.f.motion_val[0][xy][1] = motion_y; s->current_picture.motion_val[0][xy][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1][0] = motion_x; s->current_picture.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1][1] = motion_y; s->current_picture.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x; s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y; s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x; s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y; s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
} }
if(s->encoding){ //FIXME encoding MUST be cleaned up if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8) if (s->mv_type == MV_TYPE_8X8)
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra) else if(s->mb_intra)
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA; s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
else else
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
} }
} }
@ -153,7 +153,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top Diag Top
Left Center Left Center
*/ */
if (!IS_SKIP(s->current_picture.f.mb_type[xy])) { if (!IS_SKIP(s->current_picture.mb_type[xy])) {
qp_c= s->qscale; qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c); s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c); s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
@ -163,10 +163,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_y){ if(s->mb_y){
int qp_dt, qp_tt, qp_tc; int qp_dt, qp_tt, qp_tc;
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride])) if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
qp_tt=0; qp_tt=0;
else else
qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride]; qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
if(qp_c) if(qp_c)
qp_tc= qp_c; qp_tc= qp_c;
@ -186,10 +186,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt); s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){ if(s->mb_x){
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride])) if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt; qp_dt= qp_tt;
else else
qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride]; qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){ if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt]; const int chroma_qp= s->chroma_qscale_table[qp_dt];
@ -208,10 +208,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_x){ if(s->mb_x){
int qp_lc; int qp_lc;
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1])) if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
qp_lc= qp_c; qp_lc= qp_c;
else else
qp_lc = s->current_picture.f.qscale_table[xy - 1]; qp_lc = s->current_picture.qscale_table[xy - 1];
if(qp_lc){ if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
@ -320,7 +320,7 @@ int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
static const int off[4]= {2, 1, 1, -1}; static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride; wrap = s->b8_stride;
mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block]; mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1]; A = mot_val[ - 1];
/* special case for first (slice) line */ /* special case for first (slice) line */

View File

@ -365,7 +365,8 @@ uint64_t time= rdtsc();
if (buf_size == 0) { if (buf_size == 0) {
/* special case for last picture */ /* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) { if (s->low_delay==0 && s->next_picture_ptr) {
*pict = s->next_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
return ret;
s->next_picture_ptr= NULL; s->next_picture_ptr= NULL;
*got_frame = 1; *got_frame = 1;
@ -746,14 +747,17 @@ intrax8_decoded:
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type); assert(s->current_picture.f.pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict = s->current_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
return ret;
ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) { } else if (s->last_picture_ptr != NULL) {
*pict = s->last_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
return ret;
ff_print_debug_info(s, s->last_picture_ptr);
} }
if(s->last_picture_ptr || s->low_delay){ if(s->last_picture_ptr || s->low_delay){
*got_frame = 1; *got_frame = 1;
ff_print_debug_info(s, pict);
} }
#ifdef PRINT_FRAME_TIME #ifdef PRINT_FRAME_TIME

View File

@ -130,11 +130,11 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n"); av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
ref = 0; ref = 0;
} }
if ((h->ref_list[0][ref].f.reference&3) != 3) { if ((h->ref_list[0][ref].reference&3) != 3) {
av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n"); av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
return; return;
} }
fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy], fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
2, 2, 2, ref, 1); 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
@ -188,29 +188,26 @@ void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
} }
} }
static void free_frame_buffer(H264Context *h, Picture *pic) static void unref_picture(H264Context *h, Picture *pic)
{
pic->period_since_free = 0;
ff_thread_release_buffer(h->avctx, &pic->f);
av_freep(&pic->f.hwaccel_picture_private);
}
static void free_picture(H264Context *h, Picture *pic)
{ {
int off = offsetof(Picture, tf) + sizeof(pic->tf);
int i; int i;
if (pic->f.data[0]) if (!pic->f.data[0])
free_frame_buffer(h, pic); return;
av_freep(&pic->qscale_table_base); pic->period_since_free = 0;
pic->f.qscale_table = NULL; ff_thread_release_buffer(h->avctx, &pic->tf);
av_freep(&pic->mb_type_base); av_buffer_unref(&pic->hwaccel_priv_buf);
pic->f.mb_type = NULL;
av_buffer_unref(&pic->qscale_table_buf);
av_buffer_unref(&pic->mb_type_buf);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
av_freep(&pic->motion_val_base[i]); av_buffer_unref(&pic->motion_val_buf[i]);
av_freep(&pic->f.ref_index[i]); av_buffer_unref(&pic->ref_index_buf[i]);
pic->f.motion_val[i] = NULL;
} }
memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
} }
static void release_unused_pictures(H264Context *h, int remove_current) static void release_unused_pictures(H264Context *h, int remove_current)
@ -218,15 +215,76 @@ static void release_unused_pictures(H264Context *h, int remove_current)
int i; int i;
/* release non reference frames */ /* release non reference frames */
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference && if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
(!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) { (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
free_frame_buffer(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
} }
} }
} }
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
{
int ret, i;
av_assert0(!dst->f.buf[0]);
av_assert0(src->f.buf[0]);
src->tf.f = &src->f;
dst->tf.f = &dst->f;
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
if (ret < 0)
goto fail;
dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf);
dst->mb_type_buf = av_buffer_ref(src->mb_type_buf);
if (!dst->qscale_table_buf || !dst->mb_type_buf)
goto fail;
dst->qscale_table = src->qscale_table;
dst->mb_type = src->mb_type;
for (i = 0; i < 2; i ++) {
dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
goto fail;
dst->motion_val[i] = src->motion_val[i];
dst->ref_index[i] = src->ref_index[i];
}
if (src->hwaccel_picture_private) {
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
if (!dst->hwaccel_priv_buf)
goto fail;
dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
}
for (i = 0; i < 2; i++)
dst->field_poc[i] = src->field_poc[i];
memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
dst->poc = src->poc;
dst->frame_num = src->frame_num;
dst->mmco_reset = src->mmco_reset;
dst->pic_id = src->pic_id;
dst->long_ref = src->long_ref;
dst->mbaff = src->mbaff;
dst->field_picture = src->field_picture;
dst->needs_realloc = src->needs_realloc;
dst->reference = src->reference;
dst->sync = src->sync;
dst->period_since_free = src->period_since_free;
return 0;
fail:
unref_picture(h, dst);
return ret;
}
static int alloc_scratch_buffers(H264Context *h, int linesize) static int alloc_scratch_buffers(H264Context *h, int linesize)
{ {
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32); int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
@ -252,60 +310,86 @@ static int alloc_scratch_buffers(H264Context *h, int linesize)
return 0; return 0;
} }
static int alloc_picture(H264Context *h, Picture *pic) static int init_table_pools(H264Context *h)
{ {
const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1; const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
const int mb_array_size = h->mb_stride * h->mb_height; const int mb_array_size = h->mb_stride * h->mb_height;
const int b4_stride = h->mb_width * 4 + 1; const int b4_stride = h->mb_width * 4 + 1;
const int b4_array_size = b4_stride * h->mb_height * 4; const int b4_array_size = b4_stride * h->mb_height * 4;
h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
av_buffer_allocz);
h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
sizeof(uint32_t), av_buffer_allocz);
h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
sizeof(int16_t), av_buffer_allocz);
h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
!h->ref_index_pool) {
av_buffer_pool_uninit(&h->qscale_table_pool);
av_buffer_pool_uninit(&h->mb_type_pool);
av_buffer_pool_uninit(&h->motion_val_pool);
av_buffer_pool_uninit(&h->ref_index_pool);
return AVERROR(ENOMEM);
}
return 0;
}
static int alloc_picture(H264Context *h, Picture *pic)
{
int i, ret = 0; int i, ret = 0;
av_assert0(!pic->f.data[0]); av_assert0(!pic->f.data[0]);
if (h->avctx->hwaccel) { if (h->avctx->hwaccel) {
const AVHWAccel *hwaccel = h->avctx->hwaccel; const AVHWAccel *hwaccel = h->avctx->hwaccel;
av_assert0(!pic->f.hwaccel_picture_private); av_assert0(!pic->hwaccel_picture_private);
if (hwaccel->priv_data_size) { if (hwaccel->priv_data_size) {
pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size); pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size);
if (!pic->f.hwaccel_picture_private) if (!pic->hwaccel_priv_buf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
} }
} }
ret = ff_thread_get_buffer(h->avctx, &pic->f); pic->tf.f = &pic->f;
ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
h->linesize = pic->f.linesize[0]; h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1]; h->uvlinesize = pic->f.linesize[1];
if (pic->f.qscale_table == NULL) { if (!h->qscale_table_pool) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base, ret = init_table_pools(h);
(big_mb_num + h->mb_stride) * sizeof(uint8_t), if (ret < 0)
fail) goto fail;
FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base,
(big_mb_num + h->mb_stride) * sizeof(uint32_t),
fail)
pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
2 * (b4_array_size + 4) * sizeof(int16_t),
fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
4 * mb_array_size * sizeof(uint8_t), fail)
}
pic->f.motion_subsample_log2 = 2;
pic->f.qstride = h->mb_stride;
} }
pic->owner2 = h; pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
if (!pic->qscale_table_buf || !pic->mb_type_buf)
goto fail;
pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
goto fail;
pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
pic->ref_index[i] = pic->ref_index_buf[i]->data;
}
pic->f.motion_subsample_log2 = 2;
return 0; return 0;
fail: fail:
free_frame_buffer(h, pic); unref_picture(h, pic);
return (ret < 0) ? ret : AVERROR(ENOMEM); return (ret < 0) ? ret : AVERROR(ENOMEM);
} }
@ -317,9 +401,8 @@ static inline int pic_is_unused(H264Context *h, Picture *pic)
return 0; return 0;
if (pic->f.data[0] == NULL) if (pic->f.data[0] == NULL)
return 1; return 1;
if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF)) if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
if (!pic->owner2 || pic->owner2 == h) return 1;
return 1;
return 0; return 0;
} }
@ -327,17 +410,16 @@ static int find_unused_picture(H264Context *h)
{ {
int i; int i;
for (i = h->picture_range_start; i < h->picture_range_end; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(h, &h->DPB[i])) if (pic_is_unused(h, &h->DPB[i]))
break; break;
} }
if (i == h->picture_range_end) if (i == MAX_PICTURE_COUNT)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (h->DPB[i].needs_realloc) { if (h->DPB[i].needs_realloc) {
h->DPB[i].needs_realloc = 0; h->DPB[i].needs_realloc = 0;
free_picture(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
avcodec_get_frame_defaults(&h->DPB[i].f);
} }
return i; return i;
@ -594,8 +676,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
// Error resilience puts the current picture in the ref list. // Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock. // Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though. // Fields can wait on each other, though.
if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
(ref->f.reference & 3) != h->picture_structure) { (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0) if (refs[0][ref_n] < 0)
nrefs[0] += 1; nrefs[0] += 1;
@ -607,8 +689,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
int ref_n = h->ref_cache[1][scan8[n]]; int ref_n = h->ref_cache[1][scan8[n]];
Picture *ref = &h->ref_list[1][ref_n]; Picture *ref = &h->ref_list[1][ref_n];
if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
(ref->f.reference & 3) != h->picture_structure) { (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1); my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0) if (refs[1][ref_n] < 0)
nrefs[1] += 1; nrefs[1] += 1;
@ -625,7 +707,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
static void await_references(H264Context *h) static void await_references(H264Context *h)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
int refs[2][48]; int refs[2][48];
int nrefs[2] = { 0 }; int nrefs[2] = { 0 };
int ref, list; int ref, list;
@ -697,7 +779,7 @@ static void await_references(H264Context *h)
int row = refs[list][ref]; int row = refs[list][ref];
if (row >= 0) { if (row >= 0) {
Picture *ref_pic = &h->ref_list[list][ref]; Picture *ref_pic = &h->ref_list[list][ref];
int ref_field = ref_pic->f.reference - 1; int ref_field = ref_pic->reference - 1;
int ref_field_picture = ref_pic->field_picture; int ref_field_picture = ref_pic->field_picture;
int pic_height = 16 * h->mb_height >> ref_field_picture; int pic_height = 16 * h->mb_height >> ref_field_picture;
@ -705,24 +787,24 @@ static void await_references(H264Context *h)
nrefs[list]--; nrefs[list]--;
if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1) - !(row & 1), FFMIN((row >> 1) - !(row & 1),
pic_height - 1), pic_height - 1),
1); 1);
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1), pic_height - 1), FFMIN((row >> 1), pic_height - 1),
0); 0);
} else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame } else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row * 2 + ref_field, FFMIN(row * 2 + ref_field,
pic_height - 1), pic_height - 1),
0); 0);
} else if (FIELD_PICTURE) { } else if (FIELD_PICTURE) {
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1), FFMIN(row, pic_height - 1),
ref_field); ref_field);
} else { } else {
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1), FFMIN(row, pic_height - 1),
0); 0);
} }
@ -814,7 +896,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
ysh = 3 - (chroma_idc == 2 /* yuv422 */); ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) { if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
// chroma offset when predicting from a field of opposite parity // chroma offset when predicting from a field of opposite parity
my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1)); my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1); emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
} }
@ -1043,13 +1125,17 @@ static void free_tables(H264Context *h, int free_rbsp)
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
av_freep(&h->visualization_buffer[i]); av_freep(&h->visualization_buffer[i]);
if (free_rbsp) { av_buffer_pool_uninit(&h->qscale_table_pool);
for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++) av_buffer_pool_uninit(&h->mb_type_pool);
free_picture(h, &h->DPB[i]); av_buffer_pool_uninit(&h->motion_val_pool);
av_buffer_pool_uninit(&h->ref_index_pool);
if (free_rbsp && h->DPB) {
for (i = 0; i < MAX_PICTURE_COUNT; i++)
unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB); av_freep(&h->DPB);
h->picture_count = 0;
} else if (h->DPB) { } else if (h->DPB) {
for (i = 0; i < h->picture_count; i++) for (i = 0; i < MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1; h->DPB[i].needs_realloc = 1;
} }
@ -1198,11 +1284,10 @@ int ff_h264_alloc_tables(H264Context *h)
init_dequant_tables(h); init_dequant_tables(h);
if (!h->DPB) { if (!h->DPB) {
h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count); h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
if (!h->DPB) if (!h->DPB)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
for (i = 0; i < h->picture_count; i++) for (i = 0; i < MAX_PICTURE_COUNT; i++)
avcodec_get_frame_defaults(&h->DPB[i].f); avcodec_get_frame_defaults(&h->DPB[i].f);
avcodec_get_frame_defaults(&h->cur_pic.f); avcodec_get_frame_defaults(&h->cur_pic.f);
} }
@ -1413,8 +1498,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
common_init(h); common_init(h);
h->picture_structure = PICT_FRAME; h->picture_structure = PICT_FRAME;
h->picture_range_start = 0;
h->picture_range_end = MAX_PICTURE_COUNT;
h->slice_context_count = 1; h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs; h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags; h->flags = avctx->flags;
@ -1462,6 +1545,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
} }
ff_init_cabac_states(); ff_init_cabac_states();
avctx->internal->allocate_progress = 1;
return 0; return 0;
} }
@ -1470,7 +1554,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
#undef REBASE_PICTURE #undef REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \ #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->DPB && \ ((pic && pic >= old_ctx->DPB && \
pic < old_ctx->DPB + old_ctx->picture_count) ? \ pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
&new_ctx->DPB[pic - old_ctx->DPB] : NULL) &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
static void copy_picture_range(Picture **to, Picture **from, int count, static void copy_picture_range(Picture **to, Picture **from, int count,
@ -1482,7 +1566,7 @@ static void copy_picture_range(Picture **to, Picture **from, int count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) || assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
IN_RANGE(from[i], old_base->DPB, IN_RANGE(from[i], old_base->DPB,
sizeof(Picture) * old_base->picture_count) || sizeof(Picture) * MAX_PICTURE_COUNT) ||
!from[i])); !from[i]));
to[i] = REBASE_PICTURE(from[i], new_base, old_base); to[i] = REBASE_PICTURE(from[i], new_base, old_base);
} }
@ -1531,7 +1615,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
H264Context *h = dst->priv_data, *h1 = src->priv_data; H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0; int inited = h->context_initialized, err = 0;
int context_reinitialized = 0; int context_reinitialized = 0;
int i; int i, ret;
if (dst == src) if (dst == src)
return 0; return 0;
@ -1601,14 +1685,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
memset(&h->me, 0, sizeof(h->me)); memset(&h->me, 0, sizeof(h->me));
h->avctx = dst; h->avctx = dst;
h->DPB = NULL; h->DPB = NULL;
h->qscale_table_pool = NULL;
h->mb_type_pool = NULL;
h->ref_index_pool = NULL;
h->motion_val_pool = NULL;
if (h1->context_initialized) { if (h1->context_initialized) {
h->context_initialized = 0; h->context_initialized = 0;
h->picture_range_start += MAX_PICTURE_COUNT; memset(&h->cur_pic, 0, sizeof(h->cur_pic));
h->picture_range_end += MAX_PICTURE_COUNT; avcodec_get_frame_defaults(&h->cur_pic.f);
h->cur_pic.tf.f = &h->cur_pic.f;
h->cur_pic.f.extended_data = h->cur_pic.f.data;
if (ff_h264_alloc_tables(h) < 0) { if (ff_h264_alloc_tables(h) < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
@ -1640,17 +1727,18 @@ static int decode_update_thread_context(AVCodecContext *dst,
h->data_partitioning = h1->data_partitioning; h->data_partitioning = h1->data_partitioning;
h->low_delay = h1->low_delay; h->low_delay = h1->low_delay;
memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB)); for (i = 0; i < MAX_PICTURE_COUNT; i++) {
// reset s->picture[].f.extended_data to s->picture[].f.data
for (i = 0; i < h->picture_count; i++) {
h->DPB[i].f.extended_data = h->DPB[i].f.data;
h->DPB[i].period_since_free ++; h->DPB[i].period_since_free ++;
unref_picture(h, &h->DPB[i]);
if (h1->DPB[i].f.data[0] &&
(ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
return ret;
} }
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
h->cur_pic = h1->cur_pic; unref_picture(h, &h->cur_pic);
h->cur_pic.f.extended_data = h->cur_pic.f.data; if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
return ret;
h->workaround_bugs = h1->workaround_bugs; h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay; h->low_delay = h1->low_delay;
@ -1741,7 +1829,7 @@ int ff_h264_frame_start(H264Context *h)
} }
pic = &h->DPB[i]; pic = &h->DPB[i];
pic->f.reference = h->droppable ? 0 : h->picture_structure; pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f.coded_picture_number = h->coded_picture_number++; pic->f.coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME; pic->field_picture = h->picture_structure != PICT_FRAME;
@ -1761,8 +1849,9 @@ int ff_h264_frame_start(H264Context *h)
avpriv_color_frame(&pic->f, c); avpriv_color_frame(&pic->f, c);
h->cur_pic_ptr = pic; h->cur_pic_ptr = pic;
h->cur_pic = *h->cur_pic_ptr; unref_picture(h, &h->cur_pic);
h->cur_pic.f.extended_data = h->cur_pic.f.data; if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
return ret;
if (CONFIG_ERROR_RESILIENCE) { if (CONFIG_ERROR_RESILIENCE) {
ff_er_frame_start(&h->er); ff_er_frame_start(&h->er);
@ -1789,7 +1878,7 @@ int ff_h264_frame_start(H264Context *h)
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
// s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding || // s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
// h->cur_pic.f.reference /* || h->contains_intra */ || 1; // h->cur_pic.reference /* || h->contains_intra */ || 1;
/* We mark the current picture as non-reference after allocating it, so /* We mark the current picture as non-reference after allocating it, so
* that if we break out due to an error it can be released automatically * that if we break out due to an error it can be released automatically
@ -1798,7 +1887,7 @@ int ff_h264_frame_start(H264Context *h)
* get released even with set reference, besides SVQ3 and others do not * get released even with set reference, besides SVQ3 and others do not
* mark frames as reference later "naturally". */ * mark frames as reference later "naturally". */
if (h->avctx->codec_id != AV_CODEC_ID_SVQ3) if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
h->cur_pic_ptr->f.reference = 0; h->cur_pic_ptr->reference = 0;
h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX; h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
@ -1823,7 +1912,6 @@ static void decode_postinit(H264Context *h, int setup_finished)
Picture *cur = h->cur_pic_ptr; Picture *cur = h->cur_pic_ptr;
int i, pics, out_of_order, out_idx; int i, pics, out_of_order, out_idx;
h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
h->cur_pic_ptr->f.pict_type = h->pict_type; h->cur_pic_ptr->f.pict_type = h->pict_type;
if (h->next_output_pic) if (h->next_output_pic)
@ -1954,8 +2042,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
av_assert0(pics <= MAX_DELAYED_PIC_COUNT); av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur; h->delayed_pic[pics++] = cur;
if (cur->f.reference == 0) if (cur->reference == 0)
cur->f.reference = DELAYED_PIC_REF; cur->reference = DELAYED_PIC_REF;
out = h->delayed_pic[0]; out = h->delayed_pic[0];
out_idx = 0; out_idx = 0;
@ -1973,10 +2061,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
out_of_order = out->poc < h->next_outputed_poc; out_of_order = out->poc < h->next_outputed_poc;
if (out_of_order || pics > h->avctx->has_b_frames) { if (out_of_order || pics > h->avctx->has_b_frames) {
out->f.reference &= ~DELAYED_PIC_REF; out->reference &= ~DELAYED_PIC_REF;
// for frame threading, the owner must be the second field's thread or // for frame threading, the owner must be the second field's thread or
// else the first thread can release the picture and reuse it unsafely // else the first thread can release the picture and reuse it unsafely
out->owner2 = h;
for (i = out_idx; h->delayed_pic[i]; i++) for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1]; h->delayed_pic[i] = h->delayed_pic[i + 1];
} }
@ -2400,7 +2487,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
void ff_h264_hl_decode_mb(H264Context *h) void ff_h264_hl_decode_mb(H264Context *h)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0; int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
if (CHROMA444) { if (CHROMA444) {
@ -2567,9 +2654,10 @@ static void flush_change(H264Context *h)
h->outputed_poc = h->next_outputed_poc = INT_MIN; h->outputed_poc = h->next_outputed_poc = INT_MIN;
h->prev_interlaced_frame = 1; h->prev_interlaced_frame = 1;
idr(h); idr(h);
h->prev_frame_num = -1; h->prev_frame_num = -1;
if (h->cur_pic_ptr) { if (h->cur_pic_ptr) {
h->cur_pic_ptr->f.reference = 0; h->cur_pic_ptr->reference = 0;
for (j=i=0; h->delayed_pic[i]; i++) for (j=i=0; h->delayed_pic[i]; i++)
if (h->delayed_pic[i] != h->cur_pic_ptr) if (h->delayed_pic[i] != h->cur_pic_ptr)
h->delayed_pic[j++] = h->delayed_pic[i]; h->delayed_pic[j++] = h->delayed_pic[i];
@ -2595,17 +2683,16 @@ static void flush_dpb(AVCodecContext *avctx)
for (i = 0; i <= MAX_DELAYED_PIC_COUNT; i++) { for (i = 0; i <= MAX_DELAYED_PIC_COUNT; i++) {
if (h->delayed_pic[i]) if (h->delayed_pic[i])
h->delayed_pic[i]->f.reference = 0; h->delayed_pic[i]->reference = 0;
h->delayed_pic[i] = NULL; h->delayed_pic[i] = NULL;
} }
flush_change(h); flush_change(h);
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++)
if (h->DPB[i].f.data[0]) unref_picture(h, &h->DPB[i]);
free_frame_buffer(h, &h->DPB[i]);
}
h->cur_pic_ptr = NULL; h->cur_pic_ptr = NULL;
unref_picture(h, &h->cur_pic);
h->mb_x = h->mb_y = 0; h->mb_x = h->mb_y = 0;
@ -2738,7 +2825,7 @@ static int field_end(H264Context *h, int in_setup)
h->mb_y = 0; h->mb_y = 0;
if (!in_setup && !h->droppable) if (!in_setup && !h->droppable)
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER && if (CONFIG_H264_VDPAU_DECODER &&
@ -3132,9 +3219,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h0->current_slice = 0; h0->current_slice = 0;
if (!h0->first_field) { if (!h0->first_field) {
if (h->cur_pic_ptr && !h->droppable && if (h->cur_pic_ptr && !h->droppable) {
h->cur_pic_ptr->owner2 == h) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
h->cur_pic_ptr = NULL; h->cur_pic_ptr = NULL;
@ -3362,11 +3448,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) { if (h0->first_field) {
assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->f.data[0]);
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* Mark old field/frame as completed */ /* Mark old field/frame as completed */
if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) { if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_BOTTOM_FIELD); last_pic_structure == PICT_BOTTOM_FIELD);
} }
@ -3375,7 +3461,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
/* Previous field is unmatched. Don't display it, but let it /* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */ * remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD); last_pic_structure == PICT_TOP_FIELD);
} }
} else { } else {
@ -3385,7 +3471,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
* pair. Throw away previous field except for reference * pair. Throw away previous field except for reference
* purposes. */ * purposes. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD); last_pic_structure == PICT_TOP_FIELD);
} }
} else { } else {
@ -3408,14 +3494,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->droppable = last_pic_droppable; h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
/* Take ownership of this buffer. Note that if another thread owned
* the first field of this buffer, we're not operating on that pointer,
* so the original thread is still responsible for reporting progress
* on that first field (or if that was us, we just did that above).
* By taking ownership, we assign responsibility to ourselves to
* report progress on the second field. */
h0->cur_pic_ptr->owner2 = h0;
} }
} }
} }
@ -3433,8 +3511,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->prev_frame_num++; h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num;
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 && if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
h->avctx->err_recognition & AV_EF_EXPLODE) h->avctx->err_recognition & AV_EF_EXPLODE)
return ret; return ret;
@ -3464,7 +3542,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) { if (h0->first_field) {
assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->f.data[0]);
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */ /* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) { if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
@ -3474,7 +3552,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h0->first_field = FIELD_PICTURE; h0->first_field = FIELD_PICTURE;
} else { } else {
if (h0->cur_pic_ptr->frame_num != h->frame_num) { if (h0->cur_pic_ptr->frame_num != h->frame_num) {
ff_thread_report_progress((AVFrame*)h0->cur_pic_ptr, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
h0->picture_structure==PICT_BOTTOM_FIELD); h0->picture_structure==PICT_BOTTOM_FIELD);
/* This and the previous field had different frame_nums. /* This and the previous field had different frame_nums.
* Consider this field first in pair. Throw away previous * Consider this field first in pair. Throw away previous
@ -3746,16 +3824,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
id_list[i] = 60; id_list[i] = 60;
if (h->ref_list[j][i].f.data[0]) { if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
int k; int k;
uint8_t *base = h->ref_list[j][i].f.base[0]; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++) for (k = 0; k < h->short_ref_count; k++)
if (h->short_ref[k]->f.base[0] == base) { if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k; id_list[i] = k;
break; break;
} }
for (k = 0; k < h->long_ref_count; k++) for (k = 0; k < h->long_ref_count; k++)
if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) { if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k; id_list[i] = h->short_ref_count + k;
break; break;
} }
@ -3766,12 +3844,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
ref2frm[1] = -1; ref2frm[1] = -1;
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] + ref2frm[i + 2] = 4 * id_list[i] +
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].reference & 3);
ref2frm[18 + 0] = ref2frm[18 + 0] =
ref2frm[18 + 1] = -1; ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++) for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].reference & 3);
} }
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0]; if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
@ -3834,11 +3912,11 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride; const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
const int b8_xy = 4 * top_xy + 2; const int b8_xy = 4 * top_xy + 2;
int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2)); int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]); AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
ref_cache[0 - 1 * 8] = ref_cache[0 - 1 * 8] =
ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]]; ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
ref_cache[2 - 1 * 8] = ref_cache[2 - 1 * 8] =
ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]]; ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
} else { } else {
AV_ZERO128(mv_dst - 1 * 8); AV_ZERO128(mv_dst - 1 * 8);
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u); AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
@ -3849,14 +3927,14 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3; const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy = 4 * left_xy[LTOP] + 1; const int b8_xy = 4 * left_xy[LTOP] + 1;
int (*ref2frm)[64] =(void*)( h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2)); int (*ref2frm)[64] =(void*)( h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]); AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]); AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]); AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]); AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
ref_cache[-1 + 0] = ref_cache[-1 + 0] =
ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]]; ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
ref_cache[-1 + 16] = ref_cache[-1 + 16] =
ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]]; ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
} else { } else {
AV_ZERO32(mv_dst - 1 + 0); AV_ZERO32(mv_dst - 1 + 0);
AV_ZERO32(mv_dst - 1 + 8); AV_ZERO32(mv_dst - 1 + 8);
@ -3880,7 +3958,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
} }
{ {
int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy]; int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2)); int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2));
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101; uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101; uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
@ -3891,7 +3969,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
} }
{ {
int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride]; int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride); AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride); AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride); AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
@ -3918,7 +3996,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type); const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) { if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) if (left_mb_field_flag != curr_mb_field_flag)
@ -3926,7 +4004,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
} else { } else {
if (curr_mb_field_flag) if (curr_mb_field_flag)
top_xy += h->mb_stride & top_xy += h->mb_stride &
(((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
if (left_mb_field_flag != curr_mb_field_flag) if (left_mb_field_flag != curr_mb_field_flag)
left_xy[LBOT] += h->mb_stride; left_xy[LBOT] += h->mb_stride;
} }
@ -3940,25 +4018,25 @@ static int fill_filter_caches(H264Context *h, int mb_type)
* This is a conservative estimate: could also check beta_offset * This is a conservative estimate: could also check beta_offset
* and more accurate chroma_qp. */ * and more accurate chroma_qp. */
int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
int qp = h->cur_pic.f.qscale_table[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy];
if (qp <= qp_thresh && if (qp <= qp_thresh &&
(left_xy[LTOP] < 0 || (left_xy[LTOP] < 0 ||
((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) && ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
(top_xy < 0 || (top_xy < 0 ||
((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) { ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
if (!FRAME_MBAFF) if (!FRAME_MBAFF)
return 1; return 1;
if ((left_xy[LTOP] < 0 || if ((left_xy[LTOP] < 0 ||
((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) && ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
(top_xy < h->mb_stride || (top_xy < h->mb_stride ||
((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh)) ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
return 1; return 1;
} }
} }
top_type = h->cur_pic.f.mb_type[top_xy]; top_type = h->cur_pic.mb_type[top_xy];
left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (h->deblocking_filter == 2) { if (h->deblocking_filter == 2) {
if (h->slice_table[top_xy] != h->slice_num) if (h->slice_table[top_xy] != h->slice_num)
top_type = 0; top_type = 0;
@ -4063,7 +4141,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
int mb_xy, mb_type; int mb_xy, mb_type;
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride; mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
h->slice_num = h->slice_table[mb_xy]; h->slice_num = h->slice_table[mb_xy];
mb_type = h->cur_pic.f.mb_type[mb_xy]; mb_type = h->cur_pic.mb_type[mb_xy];
h->list_count = h->list_counts[mb_xy]; h->list_count = h->list_counts[mb_xy];
if (FRAME_MBAFF) if (FRAME_MBAFF)
@ -4098,8 +4176,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
uvlinesize, 0); uvlinesize, 0);
if (fill_filter_caches(h, mb_type)) if (fill_filter_caches(h, mb_type))
continue; continue;
h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]); h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]); h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
@ -4121,9 +4199,9 @@ static void predict_field_decoding_flag(H264Context *h)
{ {
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride; const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ? int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
h->cur_pic.f.mb_type[mb_xy - 1] : h->cur_pic.mb_type[mb_xy - 1] :
(h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ? (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0; h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0; h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
} }
@ -4157,7 +4235,7 @@ static void decode_finish_row(H264Context *h)
if (h->droppable) if (h->droppable)
return; return;
ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1, ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
@ -4720,9 +4798,8 @@ again:
end: end:
/* clean up */ /* clean up */
if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h && if (h->cur_pic_ptr && !h->droppable) {
!h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
@ -4752,6 +4829,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
int buf_index = 0; int buf_index = 0;
Picture *out; Picture *out;
int i, out_idx; int i, out_idx;
int ret;
h->flags = avctx->flags; h->flags = avctx->flags;
@ -4779,9 +4857,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
h->delayed_pic[i] = h->delayed_pic[i + 1]; h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) { if (out) {
out->f.reference &= ~DELAYED_PIC_REF; out->reference &= ~DELAYED_PIC_REF;
if ((ret = av_frame_ref(pict, &out->f)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*pict = out->f;
} }
return buf_index; return buf_index;
@ -4836,8 +4915,9 @@ not_extra:
/* Wait for second field. */ /* Wait for second field. */
*got_frame = 0; *got_frame = 0;
if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) { if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
if ((ret = av_frame_ref(pict, &h->next_output_pic->f)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*pict = h->next_output_pic->f;
} }
} }
@ -4872,13 +4952,15 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
ff_h264_remove_all_refs(h); ff_h264_remove_all_refs(h);
ff_h264_free_context(h); ff_h264_free_context(h);
if (h->DPB && !h->avctx->internal->is_copy) { if (h->DPB) {
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
free_picture(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
} }
} }
av_freep(&h->DPB); av_freep(&h->DPB);
unref_picture(h, &h->cur_pic);
return 0; return 0;
} }

View File

@ -270,8 +270,6 @@ typedef struct H264Context {
Picture *DPB; Picture *DPB;
Picture *cur_pic_ptr; Picture *cur_pic_ptr;
Picture cur_pic; Picture cur_pic;
int picture_count;
int picture_range_start, picture_range_end;
int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264 int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
int chroma_qp[2]; // QPc int chroma_qp[2]; // QPc
@ -648,6 +646,11 @@ typedef struct H264Context {
int16_t *dc_val_base; int16_t *dc_val_base;
uint8_t *visualization_buffer[3]; ///< temporary buffer vor MV visualization uint8_t *visualization_buffer[3]; ///< temporary buffer vor MV visualization
AVBufferPool *qscale_table_pool;
AVBufferPool *mb_type_pool;
AVBufferPool *motion_val_pool;
AVBufferPool *ref_index_pool;
} H264Context; } H264Context;
extern const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM + 1]; ///< One chroma qp table for each possible bit depth (8-14). extern const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM + 1]; ///< One chroma qp table for each possible bit depth (8-14).
@ -903,7 +906,7 @@ static av_always_inline void write_back_motion_list(H264Context *h,
int b_xy, int b8_xy, int b_xy, int b8_xy,
int mb_type, int list) int mb_type, int list)
{ {
int16_t(*mv_dst)[2] = &h->cur_pic.f.motion_val[list][b_xy]; int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]]; int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0); AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1); AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
@ -924,7 +927,7 @@ static av_always_inline void write_back_motion_list(H264Context *h,
} }
{ {
int8_t *ref_index = &h->cur_pic.f.ref_index[list][b8_xy]; int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list]; int8_t *ref_cache = h->ref_cache[list];
ref_index[0 + 0 * 2] = ref_cache[scan8[0]]; ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
ref_index[1 + 0 * 2] = ref_cache[scan8[4]]; ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
@ -942,7 +945,7 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type)
if (USES_LIST(mb_type, 0)) { if (USES_LIST(mb_type, 0)) {
write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0); write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0);
} else { } else {
fill_rectangle(&h->cur_pic.f.ref_index[0][b8_xy], fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1); 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
} }
if (USES_LIST(mb_type, 1)) if (USES_LIST(mb_type, 1))

View File

@ -1282,8 +1282,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
unsigned long ctx = 0; unsigned long ctx = 0;
ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num); ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
ctx += (h->cur_pic.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num); ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] ); return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
} }
@ -1327,13 +1327,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mba_xy = mb_xy - 1; mba_xy = mb_xy - 1;
if( (mb_y&1) if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num && h->slice_table[mba_xy] == h->slice_num
&& MB_FIELD == !!IS_INTERLACED( h->cur_pic.f.mb_type[mba_xy] ) ) && MB_FIELD == !!IS_INTERLACED( h->cur_pic.mb_type[mba_xy] ) )
mba_xy += h->mb_stride; mba_xy += h->mb_stride;
if( MB_FIELD ){ if( MB_FIELD ){
mbb_xy = mb_xy - h->mb_stride; mbb_xy = mb_xy - h->mb_stride;
if( !(mb_y&1) if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num && h->slice_table[mbb_xy] == h->slice_num
&& IS_INTERLACED( h->cur_pic.f.mb_type[mbb_xy] ) ) && IS_INTERLACED( h->cur_pic.mb_type[mbb_xy] ) )
mbb_xy -= h->mb_stride; mbb_xy -= h->mb_stride;
}else }else
mbb_xy = mb_x + (mb_y-1)*h->mb_stride; mbb_xy = mb_x + (mb_y-1)*h->mb_stride;
@ -1343,9 +1343,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE); mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE);
} }
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mba_xy] )) if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mba_xy] ))
ctx++; ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mbb_xy] )) if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mbb_xy] ))
ctx++; ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B ) if( h->slice_type_nos == AV_PICTURE_TYPE_B )
@ -1893,7 +1893,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
/* read skip flags */ /* read skip flags */
if( skip ) { if( skip ) {
if( FRAME_MBAFF && (h->mb_y&1)==0 ){ if( FRAME_MBAFF && (h->mb_y&1)==0 ){
h->cur_pic.f.mb_type[mb_xy] = MB_TYPE_SKIP; h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 ); h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 );
if(!h->next_mb_skipped) if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
@ -2012,10 +2012,10 @@ decode_intra_mb:
h->cbp_table[mb_xy] = 0xf7ef; h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0; h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0 // In deblocking, the quantizer is 0
h->cur_pic.f.qscale_table[mb_xy] = 0; h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present // All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48); memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0; h->last_qscale_diff = 0;
return 0; return 0;
} }
@ -2316,7 +2316,7 @@ decode_intra_mb:
AV_WN32A(&nnz_cache[4+8*10], top_empty); AV_WN32A(&nnz_cache[4+8*10], top_empty);
} }
} }
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) { if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8; const uint8_t *scan, *scan8x8;
@ -2418,7 +2418,7 @@ decode_intra_mb:
h->last_qscale_diff = 0; h->last_qscale_diff = 0;
} }
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h); write_back_non_zero_count(h);
return 0; return 0;

View File

@ -770,11 +770,11 @@ decode_intra_mb:
skip_bits_long(&h->gb, mb_size); skip_bits_long(&h->gb, mb_size);
// In deblocking, the quantizer is 0 // In deblocking, the quantizer is 0
h->cur_pic.f.qscale_table[mb_xy] = 0; h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present // All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48); memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
return 0; return 0;
} }
@ -1074,7 +1074,7 @@ decode_intra_mb:
} }
h->cbp= h->cbp=
h->cbp_table[mb_xy]= cbp; h->cbp_table[mb_xy]= cbp;
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){ if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, i8x8, chroma_idx; int i4x4, i8x8, chroma_idx;
@ -1155,7 +1155,7 @@ decode_intra_mb:
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
} }
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h); write_back_non_zero_count(h);
return 0; return 0;

View File

@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
poc= (poc&~3) + rfield + 1; poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){ for(j=start; j<end; j++){
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) { if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j; int cur_ref= mbafi ? (j-16)^field : j;
if (ref1->mbaff) if (ref1->mbaff)
map[list][2 * old_ref + (rfield^field) + 16] = cur_ref; map[list][2 * old_ref + (rfield^field) + 16] = cur_ref;
@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
Picture * const cur = h->cur_pic_ptr; Picture * const cur = h->cur_pic_ptr;
int list, j, field; int list, j, field;
int sidx= (h->picture_structure&1)^1; int sidx= (h->picture_structure&1)^1;
int ref1sidx = (ref1->f.reference&1)^1; int ref1sidx = (ref1->reference&1)^1;
for(list=0; list<2; list++){ for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list]; cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++) for(j=0; j<h->ref_count[list]; j++)
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3); cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference & 3);
} }
if(h->picture_structure == PICT_FRAME){ if(h->picture_structure == PICT_FRAME){
@ -126,8 +126,8 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
int *col_poc = h->ref_list[1]->field_poc; int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc)); h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity; ref1sidx=sidx= h->col_parity;
} else if (!(h->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity } else if (!(h->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3; h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
} }
if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred) if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y) static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{ {
int ref_field = ref->f.reference - 1; int ref_field = ref->reference - 1;
int ref_field_picture = ref->field_picture; int ref_field_picture = ref->field_picture;
int ref_height = 16*h->mb_height >> ref_field_picture; int ref_height = 16*h->mb_height >> ref_field_picture;
@ -153,7 +153,7 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y
//FIXME it can be safe to access mb stuff //FIXME it can be safe to access mb stuff
//even if pixels aren't deblocked yet //even if pixels aren't deblocked yet
ff_thread_await_progress(&ref->f, ff_thread_await_progress(&ref->tf,
FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1), FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
ref_field_picture && ref_field); ref_field_picture && ref_field);
} }
@ -172,7 +172,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
int mv[2]; int mv[2];
int list; int list;
assert(h->ref_list[1][0].f.reference & 3); assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
@ -234,7 +234,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
return; return;
} }
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity; mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1; mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride; b8_stride = 2+4*h->mb_stride;
b4_stride *= 6; b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
@ -268,7 +268,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR }else{ // AFR/FR -> AFR/FR
single_col: single_col:
mb_type_col[0] = mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
@ -288,10 +288,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y); await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = (void*)&h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; l1mv0 = (void*)&h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = (void*)&h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; l1mv1 = (void*)&h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){ if(!b8_stride){
if(h->mb_y&1){ if(h->mb_y&1){
l1ref0 += 2; l1ref0 += 2;
@ -419,11 +419,11 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
unsigned int sub_mb_type; unsigned int sub_mb_type;
int i8, i4; int i8, i4;
assert(h->ref_list[1][0].f.reference & 3); assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity; mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
@ -437,8 +437,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1; mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride; b8_stride = 2+4*h->mb_stride;
b4_stride *= 6; b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
@ -458,7 +458,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR }else{ // AFR/FR -> AFR/FR
single_col: single_col:
mb_type_col[0] = mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
@ -478,10 +478,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y); await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = (void*)&h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; l1mv0 = (void*)&h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = (void*)&h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; l1mv1 = (void*)&h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){ if(!b8_stride){
if(h->mb_y&1){ if(h->mb_y&1){
l1ref0 += 2; l1ref0 += 2;

View File

@ -253,10 +253,10 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
int a = h->slice_alpha_c0_offset - qp_bd_offset; int a = h->slice_alpha_c0_offset - qp_bd_offset;
int b = h->slice_beta_offset - qp_bd_offset; int b = h->slice_beta_offset - qp_bd_offset;
int mb_type = h->cur_pic.f.mb_type[mb_xy]; int mb_type = h->cur_pic.mb_type[mb_xy];
int qp = h->cur_pic.f.qscale_table[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy];
int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1]; int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy]; int qp1 = h->cur_pic.qscale_table[h->top_mb_xy];
int qpc = get_chroma_qp( h, 0, qp ); int qpc = get_chroma_qp( h, 0, qp );
int qpc0 = get_chroma_qp( h, 0, qp0 ); int qpc0 = get_chroma_qp( h, 0, qp0 );
int qpc1 = get_chroma_qp( h, 0, qp1 ); int qpc1 = get_chroma_qp( h, 0, qp1 );
@ -494,10 +494,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
for(j=0; j<2; j++, mbn_xy += h->mb_stride){ for(j=0; j<2; j++, mbn_xy += h->mb_stride){
DECLARE_ALIGNED(8, int16_t, bS)[4]; DECLARE_ALIGNED(8, int16_t, bS)[4];
int qp; int qp;
if (IS_INTRA(mb_type | h->cur_pic.f.mb_type[mbn_xy])) { if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
AV_WN64A(bS, 0x0003000300030003ULL); AV_WN64A(bS, 0x0003000300030003ULL);
} else { } else {
if (!CABAC && IS_8x8DCT(h->cur_pic.f.mb_type[mbn_xy])) { if (!CABAC && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]); bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]); bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]); bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
@ -512,12 +512,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
} }
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1; qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } { int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 ); filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) { if (chroma) {
if (chroma444) { if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
@ -577,12 +577,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){ if(bS[0]+bS[1]+bS[2]+bS[3]){
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1; qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
//tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]); //tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } //{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) { if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 ); filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) { if (chroma) {
@ -662,7 +662,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
/* Filter edge */ /* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
qp = h->cur_pic.f.qscale_table[mb_xy]; qp = h->cur_pic.qscale_table[mb_xy];
//tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]); //tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } //{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
@ -703,7 +703,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
const int mb_xy= mb_x + mb_y*h->mb_stride; const int mb_xy= mb_x + mb_y*h->mb_stride;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0; int first_vertical_edge_done = 0;
av_unused int dir; av_unused int dir;
@ -759,9 +759,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
} }
} }
mb_qp = h->cur_pic.f.qscale_table[mb_xy]; mb_qp = h->cur_pic.qscale_table[mb_xy];
mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]]; mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]];
mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]]; mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;

View File

@ -43,7 +43,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h)
const int mb_x = h->mb_x; const int mb_x = h->mb_x;
const int mb_y = h->mb_y; const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/; int linesize, uvlinesize /*dct_offset*/;
int i, j; int i, j;
@ -272,7 +272,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h)
const int mb_x = h->mb_x; const int mb_x = h->mb_x;
const int mb_y = h->mb_y; const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest[3]; uint8_t *dest[3];
int linesize; int linesize;
int i, j, p; int i, j, p;

View File

@ -68,7 +68,7 @@ static void MCFUNC(hl_motion)(H264Context *h, uint8_t *dest_y,
h264_biweight_func *weight_avg) h264_biweight_func *weight_avg)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
av_assert2(IS_INTER(mb_type)); av_assert2(IS_INTER(mb_type));

View File

@ -47,15 +47,15 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \ const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
if (!USES_LIST(mb_type, list)) \ if (!USES_LIST(mb_type, list)) \
return LIST_NOT_USED; \ return LIST_NOT_USED; \
mv = h->cur_pic_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \ mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \ h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \ h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
return h->cur_pic_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP; return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
if (topright_ref == PART_NOT_AVAILABLE if (topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0] + 8 && (i & 7) == 4 && i >= scan8[0] + 8 && (i & 7) == 4
&& h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) { && h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
const uint32_t *mb_types = h->cur_pic_ptr->f.mb_type; const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
const int16_t *mv; const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0] - 2]); AV_ZERO32(h->mv_cache[list][scan8[0] - 2]);
*C = h->mv_cache[list][scan8[0] - 2]; *C = h->mv_cache[list][scan8[0] - 2];
@ -252,8 +252,8 @@ static av_always_inline void pred_pskip_motion(H264Context *const h)
{ {
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 }; DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
int8_t *ref = h->cur_pic.f.ref_index[0]; int8_t *ref = h->cur_pic.ref_index[0];
int16_t(*mv)[2] = h->cur_pic.f.motion_val[0]; int16_t(*mv)[2] = h->cur_pic.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my; int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C; const int16_t *A, *B, *C;
int b_stride = h->b_stride; int b_stride = h->b_stride;
@ -369,7 +369,7 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
h->left_block = left_block_options[0]; h->left_block = left_block_options[0];
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type); const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) { if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) { if (left_mb_field_flag != curr_mb_field_flag) {
@ -387,9 +387,9 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
} }
} else { } else {
if (curr_mb_field_flag) { if (curr_mb_field_flag) {
topleft_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy - 1] >> 7) & 1) - 1); topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy + 1] >> 7) & 1) - 1); topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
} }
if (left_mb_field_flag != curr_mb_field_flag) { if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) { if (curr_mb_field_flag) {
@ -409,11 +409,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
h->left_mb_xy[LBOT] = left_xy[LBOT]; h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context? //FIXME do we need all in the context?
h->topleft_type = h->cur_pic.f.mb_type[topleft_xy]; h->topleft_type = h->cur_pic.mb_type[topleft_xy];
h->top_type = h->cur_pic.f.mb_type[top_xy]; h->top_type = h->cur_pic.mb_type[top_xy];
h->topright_type = h->cur_pic.f.mb_type[topright_xy]; h->topright_type = h->cur_pic.mb_type[topright_xy];
h->left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; h->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; h->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (FMO) { if (FMO) {
if (h->slice_table[topleft_xy] != h->slice_num) if (h->slice_table[topleft_xy] != h->slice_num)
@ -479,7 +479,7 @@ static void fill_decode_caches(H264Context *h, int mb_type)
h->left_samples_available &= 0xFF5F; h->left_samples_available &= 0xFF5F;
} }
} else { } else {
int left_typei = h->cur_pic.f.mb_type[left_xy[LTOP] + h->mb_stride]; int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
av_assert2(left_xy[LTOP] == left_xy[LBOT]); av_assert2(left_xy[LTOP] == left_xy[LBOT]);
if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) { if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
@ -601,9 +601,9 @@ static void fill_decode_caches(H264Context *h, int mb_type)
int b_stride = h->b_stride; int b_stride = h->b_stride;
for (list = 0; list < h->list_count; list++) { for (list = 0; list < h->list_count; list++) {
int8_t *ref_cache = &h->ref_cache[list][scan8[0]]; int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = h->cur_pic.f.ref_index[list]; int8_t *ref = h->cur_pic.ref_index[list];
int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]]; int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t(*mv)[2] = h->cur_pic.f.motion_val[list]; int16_t(*mv)[2] = h->cur_pic.motion_val[list];
if (!USES_LIST(mb_type, list)) if (!USES_LIST(mb_type, list))
continue; continue;
av_assert2(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)); av_assert2(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
@ -820,8 +820,8 @@ static void av_unused decode_mb_skip(H264Context *h)
} }
write_back_motion(h, mb_type); write_back_motion(h, mb_type);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
h->slice_table[mb_xy] = h->slice_num; h->slice_table[mb_xy] = h->slice_num;
h->prev_mb_skipped = 1; h->prev_mb_skipped = 1;
} }

View File

@ -34,13 +34,20 @@
//#undef NDEBUG //#undef NDEBUG
#include <assert.h> #include <assert.h>
#define COPY_PICTURE(dst, src) \
do {\
*(dst) = *(src);\
(dst)->f.extended_data = (dst)->f.data;\
(dst)->tf.f = &(dst)->f;\
} while (0)
static void pic_as_field(Picture *pic, const int parity){ static void pic_as_field(Picture *pic, const int parity){
int i; int i;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD) if (parity == PICT_BOTTOM_FIELD)
pic->f.data[i] += pic->f.linesize[i]; pic->f.data[i] += pic->f.linesize[i];
pic->f.reference = parity; pic->reference = parity;
pic->f.linesize[i] *= 2; pic->f.linesize[i] *= 2;
} }
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD]; pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
@ -48,10 +55,10 @@ static void pic_as_field(Picture *pic, const int parity){
static int split_field_copy(Picture *dest, Picture *src, static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){ int parity, int id_add){
int match = !!(src->f.reference & parity); int match = !!(src->reference & parity);
if (match) { if (match) {
*dest = *src; COPY_PICTURE(dest, src);
if(parity != PICT_FRAME){ if(parity != PICT_FRAME){
pic_as_field(dest, parity); pic_as_field(dest, parity);
dest->pic_id *= 2; dest->pic_id *= 2;
@ -67,9 +74,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
int index=0; int index=0;
while(i[0]<len || i[1]<len){ while(i[0]<len || i[1]<len){
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel))) while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
i[0]++; i[0]++;
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3)))) while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
i[1]++; i[1]++;
if(i[0] < len){ if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num; in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
@ -133,8 +140,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
if(lens[0] == lens[1] && lens[1] > 1){ if(lens[0] == lens[1] && lens[1] > 1){
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++); for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
if(i == lens[0]) if (i == lens[0]) {
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); Picture tmp;
COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);
COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);
COPY_PICTURE(&h->default_ref_list[1][1], &tmp);
}
} }
}else{ }else{
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure); len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure);
@ -182,13 +193,14 @@ static int pic_num_extract(H264Context *h, int pic_num, int *structure){
} }
int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
int list, index, pic_structure; int list, index, pic_structure, i;
print_short_term(h); print_short_term(h);
print_long_term(h); print_long_term(h);
for(list=0; list<h->list_count; list++){ for(list=0; list<h->list_count; list++){
memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); for (i = 0; i < h->ref_count[list]; i++)
COPY_PICTURE(&h->ref_list[list][i], &h->default_ref_list[list][i]);
if(get_bits1(&h->gb)){ if(get_bits1(&h->gb)){
int pred= h->curr_pic_num; int pred= h->curr_pic_num;
@ -225,11 +237,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
for(i= h->short_ref_count-1; i>=0; i--){ for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i]; ref = h->short_ref[i];
assert(ref->f.reference); assert(ref->reference);
assert(!ref->long_ref); assert(!ref->long_ref);
if( if(
ref->frame_num == frame_num && ref->frame_num == frame_num &&
(ref->f.reference & pic_structure) (ref->reference & pic_structure)
) )
break; break;
} }
@ -246,8 +258,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
return -1; return -1;
} }
ref = h->long_ref[long_idx]; ref = h->long_ref[long_idx];
assert(!(ref && !ref->f.reference)); assert(!(ref && !ref->reference));
if (ref && (ref->f.reference & pic_structure)) { if (ref && (ref->reference & pic_structure)) {
ref->pic_id= pic_id; ref->pic_id= pic_id;
assert(ref->long_ref); assert(ref->long_ref);
i=0; i=0;
@ -265,9 +277,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
break; break;
} }
for(; i > index; i--){ for(; i > index; i--){
h->ref_list[list][i]= h->ref_list[list][i-1]; COPY_PICTURE(&h->ref_list[list][i], &h->ref_list[list][i - 1]);
} }
h->ref_list[list][index]= *ref; COPY_PICTURE(&h->ref_list[list][index], ref);
if (FIELD_PICTURE){ if (FIELD_PICTURE){
pic_as_field(&h->ref_list[list][index], pic_structure); pic_as_field(&h->ref_list[list][index], pic_structure);
} }
@ -287,7 +299,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
for (i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) for (i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
h->last_pocs[i] = INT_MIN; h->last_pocs[i] = INT_MIN;
if (h->default_ref_list[list][0].f.data[0]) if (h->default_ref_list[list][0].f.data[0])
h->ref_list[list][index]= h->default_ref_list[list][0]; COPY_PICTURE(&h->ref_list[list][index], &h->default_ref_list[list][0]);
else else
return -1; return -1;
} }
@ -303,15 +315,15 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
for(i=0; i<h->ref_count[list]; i++){ for(i=0; i<h->ref_count[list]; i++){
Picture *frame = &h->ref_list[list][i]; Picture *frame = &h->ref_list[list][i];
Picture *field = &h->ref_list[list][16+2*i]; Picture *field = &h->ref_list[list][16+2*i];
field[0] = *frame; COPY_PICTURE(field, frame);
for(j=0; j<3; j++) for(j=0; j<3; j++)
field[0].f.linesize[j] <<= 1; field[0].f.linesize[j] <<= 1;
field[0].f.reference = PICT_TOP_FIELD; field[0].reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0]; field[0].poc= field[0].field_poc[0];
field[1] = field[0]; COPY_PICTURE(field + 1, field);
for(j=0; j<3; j++) for(j=0; j<3; j++)
field[1].f.data[j] += frame->f.linesize[j]; field[1].f.data[j] += frame->f.linesize[j];
field[1].f.reference = PICT_BOTTOM_FIELD; field[1].reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1]; field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0]; h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
@ -337,12 +349,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
*/ */
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){ static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i; int i;
if (pic->f.reference &= refmask) { if (pic->reference &= refmask) {
return 0; return 0;
} else { } else {
for(i = 0; h->delayed_pic[i]; i++) for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){ if(pic == h->delayed_pic[i]){
pic->f.reference = DELAYED_PIC_REF; pic->reference = DELAYED_PIC_REF;
break; break;
} }
return 1; return 1;
@ -498,7 +510,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
if (h->short_ref_count && if (h->short_ref_count &&
h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count && h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
!(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->f.reference)) { !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED; mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
mmco_index = 1; mmco_index = 1;
@ -592,7 +604,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->long_ref_count++; h->long_ref_count++;
} }
h->cur_pic_ptr->f.reference |= h->picture_structure; h->cur_pic_ptr->reference |= h->picture_structure;
current_ref_assigned=1; current_ref_assigned=1;
break; break;
case MMCO_SET_MAX_LONG: case MMCO_SET_MAX_LONG:
@ -629,7 +641,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
*/ */
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) { if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
/* Just mark the second field valid */ /* Just mark the second field valid */
h->cur_pic_ptr->f.reference = PICT_FRAME; h->cur_pic_ptr->reference = PICT_FRAME;
} else if (h->cur_pic_ptr->long_ref) { } else if (h->cur_pic_ptr->long_ref) {
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference " av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field " "assignment for second field "
@ -648,7 +660,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->short_ref[0]= h->cur_pic_ptr; h->short_ref[0]= h->cur_pic_ptr;
h->short_ref_count++; h->short_ref_count++;
h->cur_pic_ptr->f.reference |= h->picture_structure; h->cur_pic_ptr->reference |= h->picture_structure;
} }
} }

View File

@ -256,7 +256,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_huffyuv_common_init(avctx); ff_huffyuv_common_init(avctx);
memset(s->vlc, 0, 3 * sizeof(VLC)); memset(s->vlc, 0, 3 * sizeof(VLC));
avctx->coded_frame = &s->picture;
avcodec_get_frame_defaults(&s->picture); avcodec_get_frame_defaults(&s->picture);
s->interlaced = s->height > 288; s->interlaced = s->height > 288;
@ -364,7 +363,6 @@ static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
HYuvContext *s = avctx->priv_data; HYuvContext *s = avctx->priv_data;
int i; int i;
avctx->coded_frame= &s->picture;
if (ff_huffyuv_alloc_temp(s)) { if (ff_huffyuv_alloc_temp(s)) {
ff_huffyuv_common_end(s); ff_huffyuv_common_end(s);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -473,7 +471,7 @@ static void decode_bgr_bitstream(HYuvContext *s, int count)
} }
} }
static void draw_slice(HYuvContext *s, int y) static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
{ {
int h, cy, i; int h, cy, i;
int offset[AV_NUM_DATA_POINTERS]; int offset[AV_NUM_DATA_POINTERS];
@ -490,14 +488,14 @@ static void draw_slice(HYuvContext *s, int y)
cy = y; cy = y;
} }
offset[0] = s->picture.linesize[0]*y; offset[0] = frame->linesize[0] * y;
offset[1] = s->picture.linesize[1]*cy; offset[1] = frame->linesize[1] * cy;
offset[2] = s->picture.linesize[2]*cy; offset[2] = frame->linesize[2] * cy;
for (i = 3; i < AV_NUM_DATA_POINTERS; i++) for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0; offset[i] = 0;
emms_c(); emms_c();
s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
s->last_slice_end = y + h; s->last_slice_end = y + h;
} }
@ -512,11 +510,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const int width2 = s->width>>1; const int width2 = s->width>>1;
const int height = s->height; const int height = s->height;
int fake_ystride, fake_ustride, fake_vstride; int fake_ystride, fake_ustride, fake_vstride;
AVFrame * const p = &s->picture; ThreadFrame frame = { .f = data };
AVFrame * const p = data;
int table_size = 0, ret; int table_size = 0, ret;
AVFrame *picture = data;
av_fast_padded_malloc(&s->bitstream_buffer, av_fast_padded_malloc(&s->bitstream_buffer,
&s->bitstream_buffer_size, &s->bitstream_buffer_size,
buf_size); buf_size);
@ -526,11 +523,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
(const uint32_t*)buf, buf_size / 4); (const uint32_t*)buf, buf_size / 4);
if (p->data[0]) if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
ff_thread_release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -601,7 +594,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (y >= s->height) break; if (y >= s->height) break;
} }
draw_slice(s, y); draw_slice(s, p, y);
ydst = p->data[0] + p->linesize[0]*y; ydst = p->data[0] + p->linesize[0]*y;
udst = p->data[1] + p->linesize[1]*cy; udst = p->data[1] + p->linesize[1]*cy;
@ -623,7 +616,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
} }
draw_slice(s, height); draw_slice(s, p, height);
break; break;
case MEDIAN: case MEDIAN:
@ -680,7 +673,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
if (y >= height) break; if (y >= height) break;
} }
draw_slice(s, y); draw_slice(s, p, y);
decode_422_bitstream(s, width); decode_422_bitstream(s, width);
@ -695,7 +688,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
draw_slice(s, height); draw_slice(s, p, height);
break; break;
} }
} }
@ -739,7 +732,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
// just 1 large slice as this is not possible in reverse order // just 1 large slice as this is not possible in reverse order
draw_slice(s, height); draw_slice(s, p, height);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
@ -753,7 +746,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
emms_c(); emms_c();
*picture = *p;
*got_frame = 1; *got_frame = 1;
return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size; return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
@ -764,9 +756,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
HYuvContext *s = avctx->priv_data; HYuvContext *s = avctx->priv_data;
int i; int i;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
ff_huffyuv_common_end(s); ff_huffyuv_common_end(s);
av_freep(&s->bitstream_buffer); av_freep(&s->bitstream_buffer);

View File

@ -66,7 +66,6 @@ typedef struct
typedef struct IdcinContext { typedef struct IdcinContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
const unsigned char *buf; const unsigned char *buf;
int size; int size;
@ -168,12 +167,10 @@ static av_cold int idcin_decode_init(AVCodecContext *avctx)
huff_build_tree(s, i); huff_build_tree(s, i);
} }
avcodec_get_frame_defaults(&s->frame);
return 0; return 0;
} }
static int idcin_decode_vlcs(IdcinContext *s) static int idcin_decode_vlcs(IdcinContext *s, AVFrame *frame)
{ {
hnode *hnodes; hnode *hnodes;
long x, y; long x, y;
@ -182,8 +179,8 @@ static int idcin_decode_vlcs(IdcinContext *s)
int bit_pos, node_num, dat_pos; int bit_pos, node_num, dat_pos;
prev = bit_pos = dat_pos = 0; prev = bit_pos = dat_pos = 0;
for (y = 0; y < (s->frame.linesize[0] * s->avctx->height); for (y = 0; y < (frame->linesize[0] * s->avctx->height);
y += s->frame.linesize[0]) { y += frame->linesize[0]) {
for (x = y; x < y + s->avctx->width; x++) { for (x = y; x < y + s->avctx->width; x++) {
node_num = s->num_huff_nodes[prev]; node_num = s->num_huff_nodes[prev];
hnodes = s->huff_nodes[prev]; hnodes = s->huff_nodes[prev];
@ -203,7 +200,7 @@ static int idcin_decode_vlcs(IdcinContext *s)
bit_pos--; bit_pos--;
} }
s->frame.data[0][x] = node_num; frame->data[0][x] = node_num;
prev = node_num; prev = node_num;
} }
} }
@ -219,53 +216,39 @@ static int idcin_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
IdcinContext *s = avctx->priv_data; IdcinContext *s = avctx->priv_data;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
AVFrame *frame = data;
int ret; int ret;
s->buf = buf; s->buf = buf;
s->size = buf_size; s->size = buf_size;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
if ((ret = ff_get_buffer(avctx, &s->frame))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
if (idcin_decode_vlcs(s)) if (idcin_decode_vlcs(s, frame))
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (pal) { if (pal) {
s->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE); memcpy(s->pal, pal, AVPALETTE_SIZE);
} }
/* make the palette available on the way out */ /* make the palette available on the way out */
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
} }
static av_cold int idcin_decode_end(AVCodecContext *avctx)
{
IdcinContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_idcin_decoder = { AVCodec ff_idcin_decoder = {
.name = "idcinvideo", .name = "idcinvideo",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_IDCIN, .id = AV_CODEC_ID_IDCIN,
.priv_data_size = sizeof(IdcinContext), .priv_data_size = sizeof(IdcinContext),
.init = idcin_decode_init, .init = idcin_decode_init,
.close = idcin_decode_end,
.decode = idcin_decode_frame, .decode = idcin_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"), .long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"),

View File

@ -40,7 +40,7 @@ typedef enum {
} mask_type; } mask_type;
typedef struct { typedef struct {
AVFrame frame; AVFrame *frame;
int planesize; int planesize;
uint8_t * planebuf; uint8_t * planebuf;
uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
@ -361,11 +361,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->bpp = avctx->bits_per_coded_sample; s->bpp = avctx->bits_per_coded_sample;
avcodec_get_frame_defaults(&s->frame); s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
if ((err = extract_header(avctx, NULL)) < 0) if ((err = extract_header(avctx, NULL)) < 0)
return err; return err;
s->frame.reference = 3;
return 0; return 0;
} }
@ -662,18 +663,16 @@ static int decode_frame(AVCodecContext *avctx,
if ((res = extract_header(avctx, avpkt)) < 0) if ((res = extract_header(avctx, avpkt)) < 0)
return res; return res;
if (s->init) { if ((res = ff_reget_buffer(avctx, s->frame)) < 0) {
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return res;
}
} else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == AV_PIX_FMT_PAL8) { }
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0) if (!s->init && avctx->bits_per_coded_sample <= 8 &&
avctx->pix_fmt == AV_PIX_FMT_PAL8) {
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame->data[1])) < 0)
return res; return res;
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB32 && avctx->bits_per_coded_sample <= 8) { } else if (!s->init && avctx->bits_per_coded_sample <= 8 &&
avctx->pix_fmt == AV_PIX_FMT_RGB32) {
if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0) if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0)
return res; return res;
} }
@ -683,18 +682,18 @@ static int decode_frame(AVCodecContext *avctx,
case 0: case 0:
if (avctx->codec_tag == MKTAG('A','C','B','M')) { if (avctx->codec_tag == MKTAG('A','C','B','M')) {
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]); memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
for(y = 0; y < avctx->height && buf < buf_end; y++ ) { for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize; buf += s->planesize;
} }
} }
} else if (s->ham) { // HAM to AV_PIX_FMT_BGR32 } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]); memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
for(y = 0; y < avctx->height; y++) { for(y = 0; y < avctx->height; y++) {
uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
memset(s->ham_buf, 0, s->planesize * 8); memset(s->ham_buf, 0, s->planesize * 8);
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize; const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
@ -711,7 +710,7 @@ static int decode_frame(AVCodecContext *avctx,
int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3); int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
int x; int x;
for(y = 0; y < avctx->height && buf < buf_end; y++ ) { for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
memcpy(row, buf, FFMIN(raw_width, buf_end - buf)); memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
buf += raw_width; buf += raw_width;
if (avctx->pix_fmt == AV_PIX_FMT_BGR32) { if (avctx->pix_fmt == AV_PIX_FMT_BGR32) {
@ -722,7 +721,7 @@ static int decode_frame(AVCodecContext *avctx,
} else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved } else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
for(y = 0; y < avctx->height; y++ ) { for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
memset(row, 0, avctx->width); memset(row, 0, avctx->width);
for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
@ -731,7 +730,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} else if (s->ham) { // HAM to AV_PIX_FMT_BGR32 } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
memset(s->ham_buf, 0, s->planesize * 8); memset(s->ham_buf, 0, s->planesize * 8);
for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
@ -741,7 +740,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} else { // AV_PIX_FMT_BGR32 } else { // AV_PIX_FMT_BGR32
for(y = 0; y < avctx->height; y++ ) { for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
memset(row, 0, avctx->width << 2); memset(row, 0, avctx->width << 2);
for (plane = 0; plane < s->bpp && buf < buf_end; plane++) { for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
@ -752,13 +751,13 @@ static int decode_frame(AVCodecContext *avctx,
} else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
for(y = 0; y < avctx->height && buf_end > buf; y++ ) { for(y = 0; y < avctx->height && buf_end > buf; y++ ) {
uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
memcpy(row, buf, FFMIN(avctx->width, buf_end - buf)); memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
buf += avctx->width + (avctx->width % 2); // padding if odd buf += avctx->width + (avctx->width % 2); // padding if odd
} }
} else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32 } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
for (y = 0; y < avctx->height && buf_end > buf; y++) { for (y = 0; y < avctx->height && buf_end > buf; y++) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf)); memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
buf += avctx->width + (avctx->width & 1); // padding if odd buf += avctx->width + (avctx->width & 1); // padding if odd
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
@ -771,7 +770,7 @@ static int decode_frame(AVCodecContext *avctx,
if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
memset(row, 0, avctx->width); memset(row, 0, avctx->width);
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
@ -780,7 +779,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32 } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32
for (y = 0; y < avctx->height ; y++ ) { for (y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t)); memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
@ -790,7 +789,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} else if (s->ham) { // HAM to AV_PIX_FMT_BGR32 } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
for (y = 0; y < avctx->height ; y++) { for (y = 0; y < avctx->height ; y++) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
memset(s->ham_buf, 0, s->planesize * 8); memset(s->ham_buf, 0, s->planesize * 8);
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
@ -800,7 +799,7 @@ static int decode_frame(AVCodecContext *avctx,
} }
} else { //AV_PIX_FMT_BGR32 } else { //AV_PIX_FMT_BGR32
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
memset(row, 0, avctx->width << 2); memset(row, 0, avctx->width << 2);
for (plane = 0; plane < s->bpp; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
@ -811,12 +810,12 @@ static int decode_frame(AVCodecContext *avctx,
} else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) { if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
buf += decode_byterun(row, avctx->width, buf, buf_end); buf += decode_byterun(row, avctx->width, buf, buf_end);
} }
} else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32 } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
for (y = 0; y < avctx->height ; y++) { for (y = 0; y < avctx->height ; y++) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end); buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize); decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
} }
@ -825,7 +824,7 @@ static int decode_frame(AVCodecContext *avctx,
} else if (avctx->codec_tag == MKTAG('D','E','E','P')) { // IFF-DEEP } else if (avctx->codec_tag == MKTAG('D','E','E','P')) { // IFF-DEEP
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
if (av_get_bits_per_pixel(desc) == 32) if (av_get_bits_per_pixel(desc) == 32)
decode_deep_rle32(s->frame.data[0], buf, buf_size, avctx->width, avctx->height, s->frame.linesize[0]); decode_deep_rle32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0]);
else else
return unsupported(avctx); return unsupported(avctx);
} }
@ -833,9 +832,9 @@ static int decode_frame(AVCodecContext *avctx,
case 4: case 4:
bytestream2_init(&gb, buf, buf_size); bytestream2_init(&gb, buf, buf_size);
if (avctx->codec_tag == MKTAG('R','G','B','8')) if (avctx->codec_tag == MKTAG('R','G','B','8'))
decode_rgb8(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]); decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
else if (avctx->codec_tag == MKTAG('R','G','B','N')) else if (avctx->codec_tag == MKTAG('R','G','B','N'))
decode_rgbn(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]); decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
else else
return unsupported(avctx); return unsupported(avctx);
break; break;
@ -843,7 +842,7 @@ static int decode_frame(AVCodecContext *avctx,
if (avctx->codec_tag == MKTAG('D','E','E','P')) { if (avctx->codec_tag == MKTAG('D','E','E','P')) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
if (av_get_bits_per_pixel(desc) == 32) if (av_get_bits_per_pixel(desc) == 32)
decode_deep_tvdc32(s->frame.data[0], buf, buf_size, avctx->width, avctx->height, s->frame.linesize[0], s->tvdc); decode_deep_tvdc32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0], s->tvdc);
else else
return unsupported(avctx); return unsupported(avctx);
} else } else
@ -853,16 +852,18 @@ static int decode_frame(AVCodecContext *avctx,
return unsupported(avctx); return unsupported(avctx);
} }
if ((res = av_frame_ref(data, s->frame)) < 0)
return res;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
IffContext *s = avctx->priv_data; IffContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_free(&s->frame);
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->planebuf); av_freep(&s->planebuf);
av_freep(&s->ham_buf); av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf); av_freep(&s->ham_palbuf);

Some files were not shown because too many files have changed in this diff Show More