1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

wmapro: decode directly to the user-provided AVFrame

This commit is contained in:
Justin Ruggles 2012-12-23 20:16:21 -05:00
parent 9873d71f31
commit f4a283eec4

View File

@ -169,7 +169,6 @@ typedef struct {
typedef struct WMAProDecodeCtx { typedef struct WMAProDecodeCtx {
/* generic decoder variables */ /* generic decoder variables */
AVCodecContext* avctx; ///< codec context for av_log AVCodecContext* avctx; ///< codec context for av_log
AVFrame frame; ///< AVFrame for decoded output
AVFloatDSPContext fdsp; AVFloatDSPContext fdsp;
uint8_t frame_data[MAX_FRAMESIZE + uint8_t frame_data[MAX_FRAMESIZE +
FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
@ -461,9 +460,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->channel_layout = channel_mask; avctx->channel_layout = channel_mask;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1293,7 +1289,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
*@return 0 if the trailer bit indicates that this is the last frame, *@return 0 if the trailer bit indicates that this is the last frame,
* 1 if there are additional frames * 1 if there are additional frames
*/ */
static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr) static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
{ {
AVCodecContext *avctx = s->avctx; AVCodecContext *avctx = s->avctx;
GetBitContext* gb = &s->gb; GetBitContext* gb = &s->gb;
@ -1366,8 +1362,8 @@ static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
} }
/* get output buffer */ /* get output buffer */
s->frame.nb_samples = s->samples_per_frame; frame->nb_samples = s->samples_per_frame;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
s->packet_loss = 1; s->packet_loss = 1;
return 0; return 0;
@ -1375,7 +1371,7 @@ static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
/** copy samples to the output buffer */ /** copy samples to the output buffer */
for (i = 0; i < avctx->channels; i++) for (i = 0; i < avctx->channels; i++)
memcpy(s->frame.extended_data[i], s->channel[i].out, memcpy(frame->extended_data[i], s->channel[i].out,
s->samples_per_frame * sizeof(*s->channel[i].out)); s->samples_per_frame * sizeof(*s->channel[i].out));
for (i = 0; i < avctx->channels; i++) { for (i = 0; i < avctx->channels; i++) {
@ -1543,7 +1539,7 @@ static int decode_packet(AVCodecContext *avctx, void *data,
/** decode the cross packet frame if it is valid */ /** decode the cross packet frame if it is valid */
if (!s->packet_loss) if (!s->packet_loss)
decode_frame(s, got_frame_ptr); decode_frame(s, data, got_frame_ptr);
} else if (s->num_saved_bits - s->frame_offset) { } else if (s->num_saved_bits - s->frame_offset) {
av_dlog(avctx, "ignoring %x previously saved bits\n", av_dlog(avctx, "ignoring %x previously saved bits\n",
s->num_saved_bits - s->frame_offset); s->num_saved_bits - s->frame_offset);
@ -1566,7 +1562,7 @@ static int decode_packet(AVCodecContext *avctx, void *data,
(frame_size = show_bits(gb, s->log2_frame_size)) && (frame_size = show_bits(gb, s->log2_frame_size)) &&
frame_size <= remaining_bits(s, gb)) { frame_size <= remaining_bits(s, gb)) {
save_bits(s, gb, frame_size, 0); save_bits(s, gb, frame_size, 0);
s->packet_done = !decode_frame(s, got_frame_ptr); s->packet_done = !decode_frame(s, data, got_frame_ptr);
} else if (!s->len_prefix } else if (!s->len_prefix
&& s->num_saved_bits > get_bits_count(&s->gb)) { && s->num_saved_bits > get_bits_count(&s->gb)) {
/** when the frames do not have a length prefix, we don't know /** when the frames do not have a length prefix, we don't know
@ -1576,7 +1572,7 @@ static int decode_packet(AVCodecContext *avctx, void *data,
therefore we save the incoming packet first, then we append therefore we save the incoming packet first, then we append
the "previous frame" data from the next packet so that the "previous frame" data from the next packet so that
we get a buffer that only contains full frames */ we get a buffer that only contains full frames */
s->packet_done = !decode_frame(s, got_frame_ptr); s->packet_done = !decode_frame(s, data, got_frame_ptr);
} else } else
s->packet_done = 1; s->packet_done = 1;
} }
@ -1592,9 +1588,6 @@ static int decode_packet(AVCodecContext *avctx, void *data,
if (s->packet_loss) if (s->packet_loss)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (*got_frame_ptr)
*(AVFrame *)data = s->frame;
return get_bits_count(gb) >> 3; return get_bits_count(gb) >> 3;
} }