mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
avcodec/codec_internal: Make FFCodec.decode use AVFrame*
This increases type-safety by avoiding conversions from/through void*. It also avoids the boilerplate "AVFrame *frame = data;" line for non-subtitle decoders. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
fb59a42ef9
commit
ce7dbd0481
@ -36,12 +36,11 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
static int zero12v_decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int line, ret;
|
int line, ret;
|
||||||
const int width = avctx->width;
|
const int width = avctx->width;
|
||||||
AVFrame *pic = data;
|
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
const uint8_t *line_end, *src = avpkt->data;
|
const uint8_t *line_end, *src = avpkt->data;
|
||||||
int stride = avctx->width * 8 / 3;
|
int stride = avctx->width * 8 / 3;
|
||||||
|
@ -834,13 +834,12 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
FourXContext *const f = avctx->priv_data;
|
FourXContext *const f = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
|
||||||
int i, frame_4cc, frame_size, ret;
|
int i, frame_4cc, frame_size, ret;
|
||||||
|
|
||||||
if (buf_size < 20)
|
if (buf_size < 20)
|
||||||
|
@ -54,10 +54,9 @@ typedef struct EightBpsContext {
|
|||||||
uint32_t pal[256];
|
uint32_t pal[256];
|
||||||
} EightBpsContext;
|
} EightBpsContext;
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
EightBpsContext * const c = avctx->priv_data;
|
EightBpsContext * const c = avctx->priv_data;
|
||||||
|
@ -86,11 +86,10 @@ static void delta_decode(uint8_t *dst, const uint8_t *src, int src_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** decode a frame */
|
/** decode a frame */
|
||||||
static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
static int eightsvx_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
EightSvxContext *esc = avctx->priv_data;
|
EightSvxContext *esc = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int channels = avctx->ch_layout.nb_channels;
|
int channels = avctx->ch_layout.nb_channels;
|
||||||
int buf_size;
|
int buf_size;
|
||||||
int ch, ret;
|
int ch, ret;
|
||||||
|
@ -480,7 +480,7 @@ static int read_audio_mux_element(struct LATMContext *latmctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int latm_decode_frame(AVCodecContext *avctx, void *out,
|
static int latm_decode_frame(AVCodecContext *avctx, AVFrame *out,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
struct LATMContext *latmctx = avctx->priv_data;
|
struct LATMContext *latmctx = avctx->priv_data;
|
||||||
|
@ -3235,7 +3235,7 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
static int aac_decode_frame_int(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, GetBitContext *gb,
|
int *got_frame_ptr, GetBitContext *gb,
|
||||||
const AVPacket *avpkt)
|
const AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
@ -3248,7 +3248,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
|||||||
int payload_alignment;
|
int payload_alignment;
|
||||||
uint8_t che_presence[4][MAX_ELEM_ID] = {{0}};
|
uint8_t che_presence[4][MAX_ELEM_ID] = {{0}};
|
||||||
|
|
||||||
ac->frame = data;
|
ac->frame = frame;
|
||||||
|
|
||||||
if (show_bits(gb, 12) == 0xfff) {
|
if (show_bits(gb, 12) == 0xfff) {
|
||||||
if ((err = parse_adts_frame_header(ac, gb)) < 0) {
|
if ((err = parse_adts_frame_header(ac, gb)) < 0) {
|
||||||
@ -3437,9 +3437,9 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
|||||||
&(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
&(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||||
if (is_dmono) {
|
if (is_dmono) {
|
||||||
if (ac->dmono_mode == 1)
|
if (ac->dmono_mode == 1)
|
||||||
((AVFrame *)data)->data[1] =((AVFrame *)data)->data[0];
|
frame->data[1] = frame->data[0];
|
||||||
else if (ac->dmono_mode == 2)
|
else if (ac->dmono_mode == 2)
|
||||||
((AVFrame *)data)->data[0] =((AVFrame *)data)->data[1];
|
frame->data[0] = frame->data[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -3448,7 +3448,7 @@ fail:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
static int aac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AACContext *ac = avctx->priv_data;
|
AACContext *ac = avctx->priv_data;
|
||||||
@ -3495,10 +3495,10 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
case AOT_ER_AAC_LTP:
|
case AOT_ER_AAC_LTP:
|
||||||
case AOT_ER_AAC_LD:
|
case AOT_ER_AAC_LD:
|
||||||
case AOT_ER_AAC_ELD:
|
case AOT_ER_AAC_ELD:
|
||||||
err = aac_decode_er_frame(avctx, data, got_frame_ptr, &gb);
|
err = aac_decode_er_frame(avctx, frame, got_frame_ptr, &gb);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt);
|
err = aac_decode_frame_int(avctx, frame, got_frame_ptr, &gb, avpkt);
|
||||||
}
|
}
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
@ -78,9 +78,8 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aasc_decode_frame(AVCodecContext *avctx,
|
static int aasc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -134,7 +133,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
|||||||
memcpy(s->frame->data[1], s->palette, s->palette_size);
|
memcpy(s->frame->data[1], s->palette, s->palette_size);
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* report that the buffer was completely consumed */
|
/* report that the buffer was completely consumed */
|
||||||
|
@ -1481,10 +1481,9 @@ static int decode_audio_block(AC3DecodeContext *s, int blk, int offset)
|
|||||||
/**
|
/**
|
||||||
* Decode a single AC-3 frame.
|
* Decode a single AC-3 frame.
|
||||||
*/
|
*/
|
||||||
static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
static int ac3_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size, full_buf_size = avpkt->size;
|
int buf_size, full_buf_size = avpkt->size;
|
||||||
AC3DecodeContext *s = avctx->priv_data;
|
AC3DecodeContext *s = avctx->priv_data;
|
||||||
|
@ -1062,10 +1062,9 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
return nb_samples;
|
return nb_samples;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ADPCMDecodeContext *c = avctx->priv_data;
|
ADPCMDecodeContext *c = avctx->priv_data;
|
||||||
|
@ -93,10 +93,9 @@ static int adx_decode(ADXContext *c, int16_t *out, int offset,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adx_decode_frame(AVCodecContext *avctx, void *data,
|
static int adx_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ADXContext *c = avctx->priv_data;
|
ADXContext *c = avctx->priv_data;
|
||||||
int16_t **samples;
|
int16_t **samples;
|
||||||
|
@ -1094,13 +1094,12 @@ static int decode_huffman2(AVCodecContext *avctx, int header, int size)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AGMContext *s = avctx->priv_data;
|
AGMContext *s = avctx->priv_data;
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
GetByteContext *gbyte = &s->gbyte;
|
GetByteContext *gbyte = &s->gbyte;
|
||||||
AVFrame *frame = data;
|
|
||||||
int w, h, width, height, header;
|
int w, h, width, height, header;
|
||||||
unsigned compressed_size;
|
unsigned compressed_size;
|
||||||
long skip;
|
long skip;
|
||||||
|
@ -381,8 +381,8 @@ static int aic_decode_slice(AICContext *ctx, int mb_x, int mb_y,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int aic_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AICContext *ctx = avctx->priv_data;
|
AICContext *ctx = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
@ -392,7 +392,7 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
int x, y, ret;
|
int x, y, ret;
|
||||||
int slice_size;
|
int slice_size;
|
||||||
|
|
||||||
ctx->frame = data;
|
ctx->frame = frame;
|
||||||
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
|
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
ctx->frame->key_frame = 1;
|
ctx->frame->key_frame = 1;
|
||||||
|
|
||||||
|
@ -413,11 +413,10 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alac_decode_frame(AVCodecContext *avctx, void *data,
|
static int alac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ALACContext *alac = avctx->priv_data;
|
ALACContext *alac = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
enum AlacRawDataBlockType element;
|
enum AlacRawDataBlockType element;
|
||||||
int channels;
|
int channels;
|
||||||
int ch, ret, got_end;
|
int ch, ret, got_end;
|
||||||
|
@ -28,10 +28,9 @@
|
|||||||
|
|
||||||
#define ALIAS_HEADER_SIZE 10
|
#define ALIAS_HEADER_SIZE 10
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *f,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *f = data;
|
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
int width, height, ret, bits_pixel, pixel;
|
int width, height, ret, bits_pixel, pixel;
|
||||||
uint8_t *out_buf;
|
uint8_t *out_buf;
|
||||||
|
@ -1791,11 +1791,10 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
|||||||
|
|
||||||
/** Decode an ALS frame.
|
/** Decode an ALS frame.
|
||||||
*/
|
*/
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ALSDecContext *ctx = avctx->priv_data;
|
ALSDecContext *ctx = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||||
const uint8_t *buffer = avpkt->data;
|
const uint8_t *buffer = avpkt->data;
|
||||||
int buffer_size = avpkt->size;
|
int buffer_size = avpkt->size;
|
||||||
|
@ -955,12 +955,11 @@ static void postfilter(AMRContext *p, float *lpc, float *buf_out)
|
|||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
|
static int amrnb_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
|
||||||
AMRChannelsContext *s = avctx->priv_data; // pointer to private data
|
AMRChannelsContext *s = avctx->priv_data; // pointer to private data
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1102,11 +1102,10 @@ static void update_sub_state(AMRWBContext *ctx)
|
|||||||
LP_ORDER_16k * sizeof(float));
|
LP_ORDER_16k * sizeof(float));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
static int amrwb_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AMRWBChannelsContext *s = avctx->priv_data;
|
AMRWBChannelsContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int sub, i, ret;
|
int sub, i, ret;
|
||||||
|
@ -108,9 +108,8 @@ exhausted:
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
AnmContext *s = avctx->priv_data;
|
AnmContext *s = avctx->priv_data;
|
||||||
const int buf_size = avpkt->size;
|
const int buf_size = avpkt->size;
|
||||||
@ -176,7 +175,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
|
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
|
@ -354,9 +354,8 @@ static int execute_code(AVCodecContext * avctx, int c)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
AnsiContext *s = avctx->priv_data;
|
AnsiContext *s = avctx->priv_data;
|
||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
@ -463,7 +462,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@ -1461,10 +1461,9 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
static int ape_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
uint8_t *sample8;
|
uint8_t *sample8;
|
||||||
|
@ -137,11 +137,10 @@ static int aptx_decode_samples(AptXContext *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aptx_decode_frame(AVCodecContext *avctx, void *data,
|
static int aptx_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AptXContext *s = avctx->priv_data;
|
AptXContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int pos, opos, channel, sample, ret;
|
int pos, opos, channel, sample, ret;
|
||||||
|
|
||||||
if (avpkt->size < s->block_size) {
|
if (avpkt->size < s->block_size) {
|
||||||
|
@ -116,11 +116,10 @@ static int fill_tileX(AVCodecContext *avctx, int tile_width, int tile_height,
|
|||||||
return pixels_overwritten;
|
return pixels_overwritten;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ARBCContext *s = avctx->priv_data;
|
ARBCContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret, nb_segments;
|
int ret, nb_segments;
|
||||||
int prev_pixels = avctx->width * avctx->height;
|
int prev_pixels = avctx->width * avctx->height;
|
||||||
|
|
||||||
|
@ -599,7 +599,7 @@ static int decode_rle(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ArgoContext *s = avctx->priv_data;
|
ArgoContext *s = avctx->priv_data;
|
||||||
@ -665,7 +665,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
||||||
memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
|
memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
frame->pict_type = s->key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
|
frame->pict_type = s->key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
|
||||||
|
@ -213,13 +213,12 @@ static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ASV1Context *const a = avctx->priv_data;
|
ASV1Context *const a = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AVFrame *const p = data;
|
|
||||||
int mb_x, mb_y, ret;
|
int mb_x, mb_y, ret;
|
||||||
|
|
||||||
if (buf_size * 8LL < a->mb_height * a->mb_width * 13LL)
|
if (buf_size * 8LL < a->mb_height * a->mb_width * 13LL)
|
||||||
|
@ -273,10 +273,9 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AT1Ctx *q = avctx->priv_data;
|
AT1Ctx *q = avctx->priv_data;
|
||||||
|
@ -790,10 +790,9 @@ static int al_decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac3_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ATRAC3Context *q = avctx->priv_data;
|
ATRAC3Context *q = avctx->priv_data;
|
||||||
@ -830,10 +829,9 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atrac3al_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac3al_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
frame->nb_samples = SAMPLES_PER_FRAME;
|
frame->nb_samples = SAMPLES_PER_FRAME;
|
||||||
|
@ -331,11 +331,10 @@ static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit,
|
|||||||
FFSWAP(Atrac3pWaveSynthParams *, ch_unit->waves_info, ch_unit->waves_info_prev);
|
FFSWAP(Atrac3pWaveSynthParams *, ch_unit->waves_info, ch_unit->waves_info_prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atrac3p_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac3p_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ATRAC3PContext *ctx = avctx->priv_data;
|
ATRAC3PContext *ctx = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
|
int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
|
||||||
float **samples_p = (float **)frame->extended_data;
|
float **samples_p = (float **)frame->extended_data;
|
||||||
|
|
||||||
|
@ -787,12 +787,11 @@ imdct:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atrac9_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
AVFrame *frame = data;
|
|
||||||
ATRAC9Context *s = avctx->priv_data;
|
ATRAC9Context *s = avctx->priv_data;
|
||||||
const int frames = FFMIN(avpkt->size / s->avg_frame_size, s->frame_count);
|
const int frames = FFMIN(avpkt->size / s->avg_frame_size, s->frame_count);
|
||||||
|
|
||||||
|
@ -478,11 +478,10 @@ static void ffat_copy_samples(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ffat_decode(AVCodecContext *avctx, void *data,
|
static int ffat_decode(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ATDecodeContext *at = avctx->priv_data;
|
ATDecodeContext *at = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int pkt_size = avpkt->size;
|
int pkt_size = avpkt->size;
|
||||||
OSStatus ret;
|
OSStatus ret;
|
||||||
AudioBufferList out_buffers;
|
AudioBufferList out_buffers;
|
||||||
|
@ -38,11 +38,9 @@ static av_cold int aura_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aura_decode_frame(AVCodecContext *avctx,
|
static int aura_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *pkt)
|
||||||
AVPacket *pkt)
|
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
uint8_t *Y, *U, *V;
|
uint8_t *Y, *U, *V;
|
||||||
uint8_t val;
|
uint8_t val;
|
||||||
int x, y, ret;
|
int x, y, ret;
|
||||||
|
@ -999,7 +999,7 @@ static int get_current_frame(AVCodecContext *avctx)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int av1_decode_frame(AVCodecContext *avctx, void *frame,
|
static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *pkt)
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
AV1DecContext *s = avctx->priv_data;
|
AV1DecContext *s = avctx->priv_data;
|
||||||
|
@ -50,11 +50,10 @@ static av_cold int init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVRnContext *a = avctx->priv_data;
|
AVRnContext *a = avctx->priv_data;
|
||||||
AVFrame *p = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int y, ret, true_height;
|
int y, ret, true_height;
|
||||||
|
@ -43,15 +43,13 @@ typedef enum {
|
|||||||
} AvsVideoSubType;
|
} AvsVideoSubType;
|
||||||
|
|
||||||
|
|
||||||
static int
|
static int avs_decode_frame(AVCodecContext * avctx, AVFrame *picture,
|
||||||
avs_decode_frame(AVCodecContext * avctx,
|
int *got_frame, AVPacket *avpkt)
|
||||||
void *data, int *got_frame, AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AvsContext *const avs = avctx->priv_data;
|
AvsContext *const avs = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
|
||||||
AVFrame *const p = avs->frame;
|
AVFrame *const p = avs->frame;
|
||||||
const uint8_t *table, *vect;
|
const uint8_t *table, *vect;
|
||||||
uint8_t *out;
|
uint8_t *out;
|
||||||
|
@ -31,11 +31,10 @@ static av_cold int avui_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int avui_decode_frame(AVCodecContext *avctx, void *data,
|
static int avui_decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
AVFrame *pic = data;
|
|
||||||
const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
|
const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
|
||||||
const uint8_t *srca;
|
const uint8_t *srca;
|
||||||
uint8_t *y, *u, *v, *a;
|
uint8_t *y, *u, *v, *a;
|
||||||
|
@ -67,9 +67,8 @@ static int set_palette(BethsoftvidContext *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
static int bethsoftvid_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
BethsoftvidContext * vid = avctx->priv_data;
|
BethsoftvidContext * vid = avctx->priv_data;
|
||||||
char block_type;
|
char block_type;
|
||||||
@ -144,7 +143,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, vid->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, vid->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -48,10 +48,9 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
static int bfi_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
GetByteContext g;
|
GetByteContext g;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
BFIContext *bfi = avctx->priv_data;
|
BFIContext *bfi = avctx->priv_data;
|
||||||
|
@ -1253,10 +1253,10 @@ end:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
BinkContext * const c = avctx->priv_data;
|
BinkContext * const c = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int plane, plane_idx, ret;
|
int plane, plane_idx, ret;
|
||||||
int bits_count = pkt->size << 3;
|
int bits_count = pkt->size << 3;
|
||||||
|
@ -140,9 +140,8 @@ static void draw_char(AVCodecContext *avctx, int c, int a)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
XbinContext *s = avctx->priv_data;
|
XbinContext *s = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
@ -153,7 +152,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
if ((avctx->width / FONT_WIDTH) * (avctx->height / s->font_height) / 256 > buf_size)
|
if ((avctx->width / FONT_WIDTH) * (avctx->height / s->font_height) / 256 > buf_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
s->frame = data;
|
s->frame = frame;
|
||||||
s->x = s->y = 0;
|
s->x = s->y = 0;
|
||||||
if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -122,12 +122,11 @@ static av_cold int bitpacked_init_decoder(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bitpacked_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
static int bitpacked_decode(AVCodecContext *avctx, AVFrame *frame,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
struct BitpackedContext *bc = avctx->priv_data;
|
struct BitpackedContext *bc = avctx->priv_data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AVFrame *frame = data;
|
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
res = bc->decode(avctx, frame, avpkt);
|
res = bc->decode(avctx, frame, avpkt);
|
||||||
|
@ -28,13 +28,11 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "msrledec.h"
|
#include "msrledec.h"
|
||||||
|
|
||||||
static int bmp_decode_frame(AVCodecContext *avctx,
|
static int bmp_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AVFrame *p = data;
|
|
||||||
unsigned int fsize, hsize;
|
unsigned int fsize, hsize;
|
||||||
int width, height;
|
int width, height;
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
|
@ -39,10 +39,9 @@ static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
|
static int bmv_aud_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int blocks = 0, total_blocks, i;
|
int blocks = 0, total_blocks, i;
|
||||||
|
@ -196,11 +196,10 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
AVPacket *pkt)
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
BMVDecContext * const c = avctx->priv_data;
|
BMVDecContext * const c = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int type, scr_off;
|
int type, scr_off;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
uint8_t *srcptr, *outptr;
|
uint8_t *srcptr, *outptr;
|
||||||
|
@ -130,11 +130,9 @@ static int pix_decode_header(PixHeader *out, GetByteContext *pgb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pix_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int pix_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
|
|
||||||
int ret, i;
|
int ret, i;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ static inline void draw_n_color(uint8_t *out, int stride, int width,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
@ -251,7 +251,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
|
memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, newpic)) < 0)
|
if ((ret = av_frame_ref(rframe, newpic)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
|
@ -1226,8 +1226,8 @@ static void cavs_flush(AVCodecContext * avctx)
|
|||||||
h->got_keyframe = 0;
|
h->got_keyframe = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int cavs_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVSContext *h = avctx->priv_data;
|
AVSContext *h = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
@ -1241,7 +1241,7 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
if (buf_size == 0) {
|
if (buf_size == 0) {
|
||||||
if (!h->low_delay && h->DPB[0].f->data[0]) {
|
if (!h->low_delay && h->DPB[0].f->data[0]) {
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
av_frame_move_ref(data, h->DPB[0].f);
|
av_frame_move_ref(rframe, h->DPB[0].f);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1274,7 +1274,7 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
frame_start ++;
|
frame_start ++;
|
||||||
if (*got_frame)
|
if (*got_frame)
|
||||||
av_frame_unref(data);
|
av_frame_unref(rframe);
|
||||||
*got_frame = 0;
|
*got_frame = 0;
|
||||||
if (!h->got_keyframe)
|
if (!h->got_keyframe)
|
||||||
break;
|
break;
|
||||||
@ -1285,13 +1285,13 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
|
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
|
||||||
if (h->DPB[!h->low_delay].f->data[0]) {
|
if (h->DPB[!h->low_delay].f->data[0]) {
|
||||||
if ((ret = av_frame_ref(data, h->DPB[!h->low_delay].f)) < 0)
|
if ((ret = av_frame_ref(rframe, h->DPB[!h->low_delay].f)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
*got_frame = 0;
|
*got_frame = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
av_frame_move_ref(data, h->cur.f);
|
av_frame_move_ref(rframe, h->cur.f);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case EXT_START_CODE:
|
case EXT_START_CODE:
|
||||||
|
@ -262,15 +262,14 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cdg_decode_frame(AVCodecContext *avctx,
|
static int cdg_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int ret;
|
int ret;
|
||||||
uint8_t command, inst;
|
uint8_t command, inst;
|
||||||
uint8_t cdg_data[CDG_DATA_SIZE] = {0};
|
uint8_t cdg_data[CDG_DATA_SIZE] = {0};
|
||||||
AVFrame *frame = data;
|
|
||||||
CDGraphicsContext *cc = avctx->priv_data;
|
CDGraphicsContext *cc = avctx->priv_data;
|
||||||
|
|
||||||
if (buf_size < CDG_MINIMUM_PKT_SIZE) {
|
if (buf_size < CDG_MINIMUM_PKT_SIZE) {
|
||||||
|
@ -151,7 +151,7 @@ static int cdtoons_render_sprite(AVCodecContext *avctx, const uint8_t *data,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cdtoons_decode_frame(AVCodecContext *avctx, void *data,
|
static int cdtoons_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
CDToonsContext *c = avctx->priv_data;
|
CDToonsContext *c = avctx->priv_data;
|
||||||
@ -398,7 +398,7 @@ done:
|
|||||||
|
|
||||||
memcpy(c->frame->data[1], c->pal, AVPALETTE_SIZE);
|
memcpy(c->frame->data[1], c->pal, AVPALETTE_SIZE);
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, c->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, c->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -243,11 +243,10 @@ static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
|
static int cdxl_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
int *got_frame, AVPacket *pkt)
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
CDXLVideoContext *c = avctx->priv_data;
|
CDXLVideoContext *c = avctx->priv_data;
|
||||||
AVFrame * const p = data;
|
|
||||||
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
|
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
|
||||||
const uint8_t *buf = pkt->data;
|
const uint8_t *buf = pkt->data;
|
||||||
|
|
||||||
|
@ -373,13 +373,12 @@ static int alloc_buffers(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
static int cfhd_decode(AVCodecContext *avctx, AVFrame *pic,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
CFHDContext *s = avctx->priv_data;
|
CFHDContext *s = avctx->priv_data;
|
||||||
CFHDDSPContext *dsp = &s->dsp;
|
CFHDDSPContext *dsp = &s->dsp;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
AVFrame *const pic = data;
|
|
||||||
int ret = 0, i, j, plane, got_buffer = 0;
|
int ret = 0, i, j, plane, got_buffer = 0;
|
||||||
int16_t *coeff_data;
|
int16_t *coeff_data;
|
||||||
|
|
||||||
|
@ -449,9 +449,8 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cinepak_decode_frame(AVCodecContext *avctx,
|
static int cinepak_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int ret = 0, buf_size = avpkt->size;
|
int ret = 0, buf_size = avpkt->size;
|
||||||
@ -489,7 +488,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
|
|||||||
if (s->palette_video)
|
if (s->palette_video)
|
||||||
memcpy (s->frame->data[1], s->pal, AVPALETTE_SIZE);
|
memcpy (s->frame->data[1], s->pal, AVPALETTE_SIZE);
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -499,7 +499,7 @@ static void extend_edges(AVFrame *buf, int tile_size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int clv_decode_frame(AVCodecContext *avctx, void *data,
|
static int clv_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
@ -638,7 +638,7 @@ static int clv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
c->pic->pict_type = AV_PICTURE_TYPE_P;
|
c->pic->pict_type = AV_PICTURE_TYPE_P;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, c->pic)) < 0)
|
if ((ret = av_frame_ref(rframe, c->pic)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
FFSWAP(AVFrame *, c->pic, c->prev);
|
FFSWAP(AVFrame *, c->pic, c->prev);
|
||||||
|
@ -29,14 +29,12 @@
|
|||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
AVFrame * const p = data;
|
|
||||||
int x, y, ret;
|
int x, y, ret;
|
||||||
|
|
||||||
if (avctx->height <= 0 || avctx->width <= 0) {
|
if (avctx->height <= 0 || avctx->width <= 0) {
|
||||||
|
@ -355,11 +355,10 @@ static int decode_yuv_frame(CLLCContext *ctx, GetBitContext *gb, AVFrame *pic)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cllc_decode_frame(AVCodecContext *avctx, void *data,
|
static int cllc_decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
int *got_picture_ptr, AVPacket *avpkt)
|
int *got_picture_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
CLLCContext *ctx = avctx->priv_data;
|
CLLCContext *ctx = avctx->priv_data;
|
||||||
AVFrame *pic = data;
|
|
||||||
uint8_t *src = avpkt->data;
|
uint8_t *src = avpkt->data;
|
||||||
uint32_t info_tag, info_offset;
|
uint32_t info_tag, info_offset;
|
||||||
int data_size;
|
int data_size;
|
||||||
|
@ -102,10 +102,9 @@ static void cng_decode_flush(AVCodecContext *avctx)
|
|||||||
p->inited = 0;
|
p->inited = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cng_decode_frame(AVCodecContext *avctx, void *data,
|
static int cng_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
CNGContext *p = avctx->priv_data;
|
CNGContext *p = avctx->priv_data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
@ -148,17 +148,17 @@ typedef struct FFCodec {
|
|||||||
int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
|
int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
|
||||||
const struct AVFrame *frame, int *got_packet_ptr);
|
const struct AVFrame *frame, int *got_packet_ptr);
|
||||||
/**
|
/**
|
||||||
* Decode picture data.
|
* Decode to an AVFrame.
|
||||||
*
|
*
|
||||||
* @param avctx codec context
|
* @param avctx codec context
|
||||||
* @param outdata codec type dependent output struct
|
* @param frame AVFrame for output
|
||||||
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
|
* @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
|
||||||
* non-empty frame was returned in outdata.
|
* non-empty frame was returned in outdata.
|
||||||
* @param[in] avpkt AVPacket containing the data to be decoded
|
* @param[in] avpkt AVPacket containing the data to be decoded
|
||||||
* @return amount of bytes read from the packet on success, negative error
|
* @return amount of bytes read from the packet on success, negative error
|
||||||
* code on failure
|
* code on failure
|
||||||
*/
|
*/
|
||||||
int (*decode)(struct AVCodecContext *avctx, void *outdata,
|
int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
|
||||||
int *got_frame_ptr, struct AVPacket *avpkt);
|
int *got_frame_ptr, struct AVPacket *avpkt);
|
||||||
/**
|
/**
|
||||||
* Decode subtitle data. Same as decode except that it uses
|
* Decode subtitle data. Same as decode except that it uses
|
||||||
|
@ -978,10 +978,9 @@ static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
static int cook_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
COOKContext *q = avctx->priv_data;
|
COOKContext *q = avctx->priv_data;
|
||||||
|
@ -48,8 +48,8 @@ typedef struct {
|
|||||||
} CpiaContext;
|
} CpiaContext;
|
||||||
|
|
||||||
|
|
||||||
static int cpia_decode_frame(AVCodecContext *avctx,
|
static int cpia_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame, AVPacket* avpkt)
|
int *got_frame, AVPacket* avpkt)
|
||||||
{
|
{
|
||||||
CpiaContext* const cpia = avctx->priv_data;
|
CpiaContext* const cpia = avctx->priv_data;
|
||||||
int i,j,ret;
|
int i,j,ret;
|
||||||
@ -185,7 +185,7 @@ static int cpia_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, cpia->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, cpia->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
|
@ -170,7 +170,7 @@ static void unpack_10bit(GetByteContext *gb, uint16_t *dst, int shift,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cri_decode_frame(AVCodecContext *avctx, void *data,
|
static int cri_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
CRIContext *s = avctx->priv_data;
|
CRIContext *s = avctx->priv_data;
|
||||||
@ -178,7 +178,6 @@ static int cri_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int ret, bps, hflip = 0, vflip = 0;
|
int ret, bps, hflip = 0, vflip = 0;
|
||||||
AVFrameSideData *rotation;
|
AVFrameSideData *rotation;
|
||||||
int compressed = 0;
|
int compressed = 0;
|
||||||
AVFrame *p = data;
|
|
||||||
|
|
||||||
s->data = NULL;
|
s->data = NULL;
|
||||||
s->data_size = 0;
|
s->data_size = 0;
|
||||||
|
@ -65,8 +65,8 @@ static void add_frame_default(AVFrame *f, const uint8_t *src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -123,7 +123,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, c->pic)) < 0)
|
if ((ret = av_frame_ref(rframe, c->pic)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
|
@ -59,14 +59,12 @@ static av_cold int cyuv_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cyuv_decode_frame(AVCodecContext *avctx,
|
static int cyuv_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
CyuvDecodeContext *s=avctx->priv_data;
|
CyuvDecodeContext *s=avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
|
|
||||||
unsigned char *y_plane;
|
unsigned char *y_plane;
|
||||||
unsigned char *u_plane;
|
unsigned char *u_plane;
|
||||||
|
@ -147,11 +147,10 @@ void ff_dca_downmix_to_stereo_float(AVFloatDSPContext *fdsp, float **samples,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dcadec_decode_frame(AVCodecContext *avctx, void *data,
|
static int dcadec_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DCAContext *s = avctx->priv_data;
|
DCAContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
uint8_t *input = avpkt->data;
|
uint8_t *input = avpkt->data;
|
||||||
int input_size = avpkt->size;
|
int input_size = avpkt->size;
|
||||||
int i, ret, prev_packet = s->packet;
|
int i, ret, prev_packet = s->packet;
|
||||||
|
@ -606,12 +606,11 @@ static void run_postproc(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dds_decode(AVCodecContext *avctx, void *data,
|
static int dds_decode(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DDSContext *ctx = avctx->priv_data;
|
DDSContext *ctx = avctx->priv_data;
|
||||||
GetByteContext *gbc = &ctx->gbc;
|
GetByteContext *gbc = &ctx->gbc;
|
||||||
AVFrame *frame = data;
|
|
||||||
int mipmap;
|
int mipmap;
|
||||||
int ret;
|
int ret;
|
||||||
int width, height;
|
int width, height;
|
||||||
|
@ -337,11 +337,9 @@ static const char chunk_name[8][5] = {
|
|||||||
"COPY", "TSW1", "BDLT", "WDLT", "TDLT", "DSW1", "BLCK", "DDS1"
|
"COPY", "TSW1", "BDLT", "WDLT", "TDLT", "DSW1", "BLCK", "DDS1"
|
||||||
};
|
};
|
||||||
|
|
||||||
static int dfa_decode_frame(AVCodecContext *avctx,
|
static int dfa_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
DfaContext *s = avctx->priv_data;
|
DfaContext *s = avctx->priv_data;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
|
@ -100,11 +100,10 @@ static av_cold int dfpwm_dec_init(struct AVCodecContext *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dfpwm_dec_frame(struct AVCodecContext *ctx, void *data,
|
static int dfpwm_dec_frame(struct AVCodecContext *ctx, AVFrame *frame,
|
||||||
int *got_frame, struct AVPacket *packet)
|
int *got_frame, struct AVPacket *packet)
|
||||||
{
|
{
|
||||||
DFPWMState *state = ctx->priv_data;
|
DFPWMState *state = ctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (packet->size * 8LL % ctx->ch_layout.nb_channels)
|
if (packet->size * 8LL % ctx->ch_layout.nb_channels)
|
||||||
|
@ -2260,10 +2260,10 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
|
static int dirac_decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||||
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
DiracContext *s = avctx->priv_data;
|
DiracContext *s = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
|
||||||
uint8_t *buf = pkt->data;
|
uint8_t *buf = pkt->data;
|
||||||
int buf_size = pkt->size;
|
int buf_size = pkt->size;
|
||||||
int i, buf_idx = 0;
|
int i, buf_idx = 0;
|
||||||
@ -2282,7 +2282,7 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
|
|
||||||
/* end of stream, so flush delayed pics */
|
/* end of stream, so flush delayed pics */
|
||||||
if (buf_size == 0)
|
if (buf_size == 0)
|
||||||
return get_delayed_pic(s, (AVFrame *)data, got_frame);
|
return get_delayed_pic(s, picture, got_frame);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
|
/*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
|
||||||
@ -2339,13 +2339,13 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
|
|
||||||
if (delayed_frame) {
|
if (delayed_frame) {
|
||||||
delayed_frame->reference ^= DELAYED_PIC_REF;
|
delayed_frame->reference ^= DELAYED_PIC_REF;
|
||||||
if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
|
if((ret = av_frame_ref(picture, delayed_frame->avframe)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
}
|
}
|
||||||
} else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
|
} else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
|
||||||
/* The right frame at the right time :-) */
|
/* The right frame at the right time :-) */
|
||||||
if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
|
if((ret = av_frame_ref(picture, s->current_picture->avframe)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
}
|
}
|
||||||
|
@ -613,13 +613,12 @@ static int dnxhd_decode_row(AVCodecContext *avctx, void *data,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
|
static int dnxhd_decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
DNXHDContext *ctx = avctx->priv_data;
|
DNXHDContext *ctx = avctx->priv_data;
|
||||||
AVFrame *picture = data;
|
|
||||||
int first_field = 1;
|
int first_field = 1;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
|
@ -1084,7 +1084,7 @@ static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dolby_e_decode_frame(AVCodecContext *avctx, void *data,
|
static int dolby_e_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DBEDecodeContext *s1 = avctx->priv_data;
|
DBEDecodeContext *s1 = avctx->priv_data;
|
||||||
@ -1135,7 +1135,7 @@ static int dolby_e_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return ret;
|
return ret;
|
||||||
if ((ret = parse_meter(s1)) < 0)
|
if ((ret = parse_meter(s1)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if ((ret = filter_frame(s1, data)) < 0)
|
if ((ret = filter_frame(s1, frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
|
@ -207,12 +207,11 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
|
static int dpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
DPCMContext *s = avctx->priv_data;
|
DPCMContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int out = 0, ret;
|
int out = 0, ret;
|
||||||
int predictor[2];
|
int predictor[2];
|
||||||
int ch = 0;
|
int ch = 0;
|
||||||
|
@ -150,14 +150,11 @@ static uint16_t read12in32(const uint8_t **ptr, uint32_t *lbuf,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
void *data,
|
int *got_frame, AVPacket *avpkt)
|
||||||
int *got_frame,
|
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AVFrame *const p = data;
|
|
||||||
uint8_t *ptr[AV_NUM_DATA_POINTERS];
|
uint8_t *ptr[AV_NUM_DATA_POINTERS];
|
||||||
uint32_t header_version, version = 0;
|
uint32_t header_version, version = 0;
|
||||||
char creator[101] = { 0 };
|
char creator[101] = { 0 };
|
||||||
|
@ -95,11 +95,10 @@ static int dsd_channel(AVCodecContext *avctx, void *tdata, int j, int threadnr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
ThreadData td;
|
ThreadData td;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
frame->nb_samples = avpkt->size / avctx->ch_layout.nb_channels;
|
frame->nb_samples = avpkt->size / avctx->ch_layout.nb_channels;
|
||||||
|
@ -87,10 +87,9 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
|
static int cinaudio_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
CinAudioContext *cin = avctx->priv_data;
|
CinAudioContext *cin = avctx->priv_data;
|
||||||
const uint8_t *buf_end = buf + avpkt->size;
|
const uint8_t *buf_end = buf + avpkt->size;
|
||||||
|
@ -194,9 +194,8 @@ static int cin_decode_rle(const unsigned char *src, int src_size,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cinvideo_decode_frame(AVCodecContext *avctx,
|
static int cinvideo_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -303,7 +302,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
|||||||
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP],
|
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP],
|
||||||
cin->bitmap_table[CIN_PRE_BMP]);
|
cin->bitmap_table[CIN_PRE_BMP]);
|
||||||
|
|
||||||
if ((res = av_frame_ref(data, cin->frame)) < 0)
|
if ((res = av_frame_ref(rframe, cin->frame)) < 0)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -740,11 +740,10 @@ static int dss_sp_decode_one_frame(DssSpContext *p,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dss_sp_decode_frame(AVCodecContext *avctx, void *data,
|
static int dss_sp_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DssSpContext *p = avctx->priv_data;
|
DssSpContext *p = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ static void build_filter(int16_t table[DST_MAX_ELEMENTS][16][256], const Table *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
unsigned samples_per_frame = DST_SAMPLES_PER_FRAME(avctx->sample_rate);
|
unsigned samples_per_frame = DST_SAMPLES_PER_FRAME(avctx->sample_rate);
|
||||||
@ -249,7 +249,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
DSTContext *s = avctx->priv_data;
|
DSTContext *s = avctx->priv_data;
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
ArithCoder *ac = &s->ac;
|
ArithCoder *ac = &s->ac;
|
||||||
AVFrame *frame = data;
|
|
||||||
uint8_t *dsd;
|
uint8_t *dsd;
|
||||||
float *pcm;
|
float *pcm;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -85,11 +85,10 @@ static inline uint16_t dv_audio_12to16(uint16_t sample)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *pkt)
|
int *got_frame_ptr, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
DVAudioContext *s = avctx->priv_data;
|
DVAudioContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *src = pkt->data;
|
const uint8_t *src = pkt->data;
|
||||||
int16_t *dst;
|
int16_t *dst;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
@ -607,13 +607,12 @@ retry:
|
|||||||
|
|
||||||
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
|
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
|
||||||
* 144000 bytes for PAL - or twice those for 50Mbps) */
|
* 144000 bytes for PAL - or twice those for 50Mbps) */
|
||||||
static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
|
static int dvvideo_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
DVVideoContext *s = avctx->priv_data;
|
DVVideoContext *s = avctx->priv_data;
|
||||||
AVFrame *const frame = data;
|
|
||||||
const uint8_t *vsc_pack;
|
const uint8_t *vsc_pack;
|
||||||
int apt, is16_9, ret;
|
int apt, is16_9, ret;
|
||||||
const AVDVProfile *sys;
|
const AVDVProfile *sys;
|
||||||
|
@ -208,9 +208,9 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
DxaDecContext * const c = avctx->priv_data;
|
DxaDecContext * const c = avctx->priv_data;
|
||||||
uint8_t *outptr, *srcptr, *tmpptr;
|
uint8_t *outptr, *srcptr, *tmpptr;
|
||||||
unsigned long dsize;
|
unsigned long dsize;
|
||||||
|
@ -784,10 +784,9 @@ static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
AV_PIX_FMT_YUV444P, vflipped);
|
AV_PIX_FMT_YUV444P, vflipped);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *pic = data;
|
|
||||||
const uint8_t *src = avpkt->data;
|
const uint8_t *src = avpkt->data;
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
int vflipped, ret;
|
int vflipped, ret;
|
||||||
|
@ -1038,11 +1038,10 @@ static int dxv_decompress_raw(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dxv_decode(AVCodecContext *avctx, void *data,
|
static int dxv_decode(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
DXVContext *ctx = avctx->priv_data;
|
DXVContext *ctx = avctx->priv_data;
|
||||||
AVFrame *const frame = data;
|
|
||||||
GetByteContext *gbc = &ctx->gbc;
|
GetByteContext *gbc = &ctx->gbc;
|
||||||
int (*decompress_tex)(AVCodecContext *avctx);
|
int (*decompress_tex)(AVCodecContext *avctx);
|
||||||
const char *msgcomp, *msgtext;
|
const char *msgcomp, *msgtext;
|
||||||
|
@ -169,15 +169,13 @@ static int cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t *
|
|||||||
#define EA_PREAMBLE_SIZE 8
|
#define EA_PREAMBLE_SIZE 8
|
||||||
#define MVIh_TAG MKTAG('M', 'V', 'I', 'h')
|
#define MVIh_TAG MKTAG('M', 'V', 'I', 'h')
|
||||||
|
|
||||||
static int cmv_decode_frame(AVCodecContext *avctx,
|
static int cmv_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
CmvContext *s = avctx->priv_data;
|
CmvContext *s = avctx->priv_data;
|
||||||
const uint8_t *buf_end = buf + buf_size;
|
const uint8_t *buf_end = buf + buf_size;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||||
|
@ -247,14 +247,12 @@ static void calc_quant_matrix(MadContext *s, int qscale)
|
|||||||
s->quant_matrix[i] = (ff_inv_aanscales[i]*ff_mpeg1_default_intra_matrix[i]*qscale + 32) >> 10;
|
s->quant_matrix[i] = (ff_inv_aanscales[i]*ff_mpeg1_default_intra_matrix[i]*qscale + 32) >> 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
MadContext *s = avctx->priv_data;
|
MadContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
int width, height;
|
int width, height;
|
||||||
int chunk_type;
|
int chunk_type;
|
||||||
|
@ -201,14 +201,12 @@ static void tgq_calculate_qtable(TgqContext *s, int quant)
|
|||||||
ff_inv_aanscales[j * 8 + i]) >> (14 - 4);
|
ff_inv_aanscales[j * 8 + i]) >> (14 - 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tgq_decode_frame(AVCodecContext *avctx,
|
static int tgq_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
TgqContext *s = avctx->priv_data;
|
TgqContext *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int x, y, ret;
|
int x, y, ret;
|
||||||
int big_endian;
|
int big_endian;
|
||||||
|
|
||||||
|
@ -262,15 +262,13 @@ static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tgv_decode_frame(AVCodecContext *avctx,
|
static int tgv_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
TgvContext *s = avctx->priv_data;
|
TgvContext *s = avctx->priv_data;
|
||||||
const uint8_t *buf_end = buf + buf_size;
|
const uint8_t *buf_end = buf + buf_size;
|
||||||
AVFrame *frame = data;
|
|
||||||
int chunk_type, ret;
|
int chunk_type, ret;
|
||||||
|
|
||||||
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
if (buf_end - buf < EA_PREAMBLE_SIZE)
|
||||||
|
@ -124,15 +124,13 @@ static void tqi_calculate_qtable(TqiContext *t, int quant)
|
|||||||
t->intra_matrix[i] = (ff_inv_aanscales[i] * ff_mpeg1_default_intra_matrix[i] * qscale + 32) >> 14;
|
t->intra_matrix[i] = (ff_inv_aanscales[i] * ff_mpeg1_default_intra_matrix[i] * qscale + 32) >> 14;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tqi_decode_frame(AVCodecContext *avctx,
|
static int tqi_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
const uint8_t *buf_end = buf+buf_size;
|
const uint8_t *buf_end = buf+buf_size;
|
||||||
TqiContext *t = avctx->priv_data;
|
TqiContext *t = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret, w, h;
|
int ret, w, h;
|
||||||
|
|
||||||
if (buf_size < 12)
|
if (buf_size < 12)
|
||||||
|
@ -196,13 +196,11 @@ static const uint16_t mask_matrix[] = {0x1, 0x2, 0x10, 0x20,
|
|||||||
0x100, 0x200, 0x1000, 0x2000,
|
0x100, 0x200, 0x1000, 0x2000,
|
||||||
0x400, 0x800, 0x4000, 0x8000};
|
0x400, 0x800, 0x4000, 0x8000};
|
||||||
|
|
||||||
static int escape124_decode_frame(AVCodecContext *avctx,
|
static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
Escape124Context *s = avctx->priv_data;
|
Escape124Context *s = avctx->priv_data;
|
||||||
AVFrame *frame = data;
|
|
||||||
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
unsigned frame_flags, frame_size;
|
unsigned frame_flags, frame_size;
|
||||||
|
@ -187,12 +187,11 @@ static int decode_skip_count(GetBitContext* gb)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int escape130_decode_frame(AVCodecContext *avctx, void *data,
|
static int escape130_decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
Escape130Context *s = avctx->priv_data;
|
Escape130Context *s = avctx->priv_data;
|
||||||
AVFrame *pic = data;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -741,11 +741,10 @@ static void frame_erasure(EVRCContext *e, float *samples)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
static int evrc_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
AVFrame *frame = data;
|
|
||||||
EVRCContext *e = avctx->priv_data;
|
EVRCContext *e = avctx->priv_data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES];
|
float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES];
|
||||||
|
@ -2023,12 +2023,11 @@ fail:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
EXRContext *s = avctx->priv_data;
|
EXRContext *s = avctx->priv_data;
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
AVFrame *picture = data;
|
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
|
|
||||||
int i, y, ret, ymax;
|
int i, y, ret, ymax;
|
||||||
|
@ -105,12 +105,11 @@ static void set_sample(int i, int j, int v, float *result, int *pads, float valu
|
|||||||
result[i * 64 + pads[i] + j * 3] = value * (2 * v - 7);
|
result[i * 64 + pads[i] + j * 3] = value * (2 * v - 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fastaudio_decode(AVCodecContext *avctx, void *data,
|
static int fastaudio_decode(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *pkt)
|
int *got_frame, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
FastAudioContext *s = avctx->priv_data;
|
FastAudioContext *s = avctx->priv_data;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
AVFrame *frame = data;
|
|
||||||
int subframes;
|
int subframes;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -831,7 +831,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
|
static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -969,7 +970,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
|
|
||||||
if (f->last_picture.f)
|
if (f->last_picture.f)
|
||||||
ff_thread_release_ext_buffer(avctx, &f->last_picture);
|
ff_thread_release_ext_buffer(avctx, &f->last_picture);
|
||||||
if ((ret = av_frame_ref(data, f->picture.f)) < 0)
|
if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -413,11 +413,10 @@ static void wavesynth_enter_intervals(struct wavesynth_context *ws, int64_t ts)
|
|||||||
*last = -1;
|
*last = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
|
static int wavesynth_decode(AVCodecContext *avc, AVFrame *frame,
|
||||||
AVPacket *packet)
|
int *rgot_frame, AVPacket *packet)
|
||||||
{
|
{
|
||||||
struct wavesynth_context *ws = avc->priv_data;
|
struct wavesynth_context *ws = avc->priv_data;
|
||||||
AVFrame *frame = rframe;
|
|
||||||
int64_t ts;
|
int64_t ts;
|
||||||
int duration;
|
int duration;
|
||||||
int s, c, r;
|
int s, c, r;
|
||||||
|
@ -267,7 +267,7 @@ static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fic_decode_frame(AVCodecContext *avctx, void *data,
|
static int fic_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
FICContext *ctx = avctx->priv_data;
|
FICContext *ctx = avctx->priv_data;
|
||||||
@ -436,7 +436,7 @@ static int fic_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
skip:
|
skip:
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, ctx->final_frame)) < 0)
|
if ((ret = av_frame_ref(rframe, ctx->final_frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
|
@ -181,9 +181,9 @@ static int fits_read_header(AVCodecContext *avctx, const uint8_t **ptr, FITSHead
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fits_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
|
static int fits_decode_frame(AVCodecContext *avctx, AVFrame *p,
|
||||||
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *p=data;
|
|
||||||
const uint8_t *ptr8 = avpkt->data, *end;
|
const uint8_t *ptr8 = avpkt->data, *end;
|
||||||
uint8_t t8;
|
uint8_t t8;
|
||||||
int16_t t16;
|
int16_t t16;
|
||||||
|
@ -554,10 +554,9 @@ static int decode_frame(FLACContext *s)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flac_decode_frame(AVCodecContext *avctx, void *data,
|
static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AVFrame *frame = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
FLACContext *s = avctx->priv_data;
|
FLACContext *s = avctx->priv_data;
|
||||||
|
@ -263,7 +263,7 @@ static int flashsv_decode_block(AVCodecContext *avctx, const AVPacket *avpkt,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
static int flashsv_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -480,7 +480,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->frame->linesize[0] * avctx->height);
|
s->frame->linesize[0] * avctx->height);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@ -149,7 +149,7 @@ static av_cold int flic_decode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||||
void *data, int *got_frame,
|
AVFrame *rframe, int *got_frame,
|
||||||
const uint8_t *buf, int buf_size)
|
const uint8_t *buf, int buf_size)
|
||||||
{
|
{
|
||||||
FlicDecodeContext *s = avctx->priv_data;
|
FlicDecodeContext *s = avctx->priv_data;
|
||||||
@ -479,7 +479,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
s->new_palette = 0;
|
s->new_palette = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
@ -488,7 +488,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
|
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
|
||||||
void *data, int *got_frame,
|
AVFrame *rframe, int *got_frame,
|
||||||
const uint8_t *buf, int buf_size)
|
const uint8_t *buf, int buf_size)
|
||||||
{
|
{
|
||||||
/* Note, the only difference between the 15Bpp and 16Bpp */
|
/* Note, the only difference between the 15Bpp and 16Bpp */
|
||||||
@ -781,7 +781,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
|
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
|
||||||
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
|
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
@ -790,7 +790,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int flic_decode_frame_24BPP(AVCodecContext *avctx,
|
static int flic_decode_frame_24BPP(AVCodecContext *avctx,
|
||||||
void *data, int *got_frame,
|
AVFrame *rframe, int *got_frame,
|
||||||
const uint8_t *buf, int buf_size)
|
const uint8_t *buf, int buf_size)
|
||||||
{
|
{
|
||||||
FlicDecodeContext *s = avctx->priv_data;
|
FlicDecodeContext *s = avctx->priv_data;
|
||||||
@ -1061,7 +1061,7 @@ static int flic_decode_frame_24BPP(AVCodecContext *avctx,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
|
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
|
||||||
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
|
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
|
||||||
|
|
||||||
if ((ret = av_frame_ref(data, s->frame)) < 0)
|
if ((ret = av_frame_ref(rframe, s->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
@ -1069,21 +1069,20 @@ static int flic_decode_frame_24BPP(AVCodecContext *avctx,
|
|||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flic_decode_frame(AVCodecContext *avctx,
|
static int flic_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||||
return flic_decode_frame_8BPP(avctx, data, got_frame,
|
return flic_decode_frame_8BPP(avctx, frame, got_frame,
|
||||||
buf, buf_size);
|
buf, buf_size);
|
||||||
} else if ((avctx->pix_fmt == AV_PIX_FMT_RGB555) ||
|
} else if ((avctx->pix_fmt == AV_PIX_FMT_RGB555) ||
|
||||||
(avctx->pix_fmt == AV_PIX_FMT_RGB565)) {
|
(avctx->pix_fmt == AV_PIX_FMT_RGB565)) {
|
||||||
return flic_decode_frame_15_16BPP(avctx, data, got_frame,
|
return flic_decode_frame_15_16BPP(avctx, frame, got_frame,
|
||||||
buf, buf_size);
|
buf, buf_size);
|
||||||
} else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
|
} else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
|
||||||
return flic_decode_frame_24BPP(avctx, data, got_frame,
|
return flic_decode_frame_24BPP(avctx, frame, got_frame,
|
||||||
buf, buf_size);
|
buf, buf_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,13 +394,12 @@ static int decode_type1(GetByteContext *gb, PutByteContext *pb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
FMVCContext *s = avctx->priv_data;
|
FMVCContext *s = avctx->priv_data;
|
||||||
GetByteContext *gb = &s->gb;
|
GetByteContext *gb = &s->gb;
|
||||||
PutByteContext *pb = &s->pb;
|
PutByteContext *pb = &s->pb;
|
||||||
AVFrame *frame = data;
|
|
||||||
int ret, y, x;
|
int ret, y, x;
|
||||||
|
|
||||||
if (avpkt->size < 8)
|
if (avpkt->size < 8)
|
||||||
|
@ -133,14 +133,12 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *f,
|
||||||
void *data, int *got_frame,
|
int *got_frame, AVPacket *avpkt)
|
||||||
AVPacket *avpkt)
|
|
||||||
{
|
{
|
||||||
FrapsContext * const s = avctx->priv_data;
|
FrapsContext * const s = avctx->priv_data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AVFrame * const f = data;
|
|
||||||
uint32_t header;
|
uint32_t header;
|
||||||
unsigned int version,header_size;
|
unsigned int version,header_size;
|
||||||
unsigned int x, y;
|
unsigned int x, y;
|
||||||
|
@ -42,12 +42,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
static int decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
FRWUContext *s = avctx->priv_data;
|
FRWUContext *s = avctx->priv_data;
|
||||||
int field, ret;
|
int field, ret;
|
||||||
AVFrame *pic = data;
|
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
const uint8_t *buf_end = buf + avpkt->size;
|
const uint8_t *buf_end = buf + avpkt->size;
|
||||||
|
|
||||||
|
@ -1372,13 +1372,12 @@ static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
static int g2m_decode_frame(AVCodecContext *avctx, AVFrame *pic,
|
||||||
int *got_picture_ptr, AVPacket *avpkt)
|
int *got_picture_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
G2MContext *c = avctx->priv_data;
|
G2MContext *c = avctx->priv_data;
|
||||||
AVFrame *pic = data;
|
|
||||||
GetByteContext bc, tbc;
|
GetByteContext bc, tbc;
|
||||||
int magic;
|
int magic;
|
||||||
int got_header = 0;
|
int got_header = 0;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user