mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge commit 'ee6ca11b657515ad736ec0d2b8635e098d0a2680'
* commit 'ee6ca11b657515ad736ec0d2b8635e098d0a2680': vorbis: decode directly to the user-provided AVFrame vmdaudio: decode directly to the user-provided AVFrame twinvq: decode directly to the user-provided AVFrame tta: decode directly to the user-provided AVFrame truespeech: decode directly to the user-provided AVFrame Conflicts: libavcodec/tta.c libavcodec/twinvq.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
dca6fb08a7
@ -36,7 +36,6 @@
|
||||
* TrueSpeech decoder context
|
||||
*/
|
||||
typedef struct {
|
||||
AVFrame frame;
|
||||
DSPContext dsp;
|
||||
/* input data */
|
||||
DECLARE_ALIGNED(16, uint8_t, buffer)[32];
|
||||
@ -73,9 +72,6 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
|
||||
|
||||
ff_dsputil_init(&c->dsp, avctx);
|
||||
|
||||
avcodec_get_frame_defaults(&c->frame);
|
||||
avctx->coded_frame = &c->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -310,6 +306,7 @@ static void truespeech_save_prevvec(TSContext *c)
|
||||
static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TSContext *c = avctx->priv_data;
|
||||
@ -327,12 +324,12 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
c->frame.nb_samples = iterations * 240;
|
||||
if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) {
|
||||
frame->nb_samples = iterations * 240;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
samples = (int16_t *)c->frame.data[0];
|
||||
samples = (int16_t *)frame->data[0];
|
||||
|
||||
memset(samples, 0, iterations * 240 * sizeof(*samples));
|
||||
|
||||
@ -354,8 +351,7 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
|
||||
truespeech_save_prevvec(c);
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = c->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
@ -61,7 +61,6 @@ typedef struct TTAChannel {
|
||||
typedef struct TTAContext {
|
||||
AVClass *class;
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
GetBitContext gb;
|
||||
const AVCRC *crc_table;
|
||||
|
||||
@ -312,15 +311,13 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TTAContext *s = avctx->priv_data;
|
||||
@ -336,15 +333,15 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
init_get_bits(&s->gb, buf, buf_size*8);
|
||||
|
||||
/* get output buffer */
|
||||
s->frame.nb_samples = framelen;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
frame->nb_samples = framelen;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// decode directly to output buffer for 24-bit sample format
|
||||
if (s->bps == 3)
|
||||
s->decode_buffer = (int32_t *)s->frame.data[0];
|
||||
s->decode_buffer = (int32_t *)frame->data[0];
|
||||
|
||||
// init per channel states
|
||||
for (i = 0; i < s->channels; i++) {
|
||||
@ -433,7 +430,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
i++;
|
||||
// check for last frame
|
||||
if (i == s->last_frame_length && get_bits_left(&s->gb) / 8 == 4) {
|
||||
s->frame.nb_samples = framelen = s->last_frame_length;
|
||||
frame->nb_samples = framelen = s->last_frame_length;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -449,20 +446,20 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
// convert to output buffer
|
||||
switch (s->bps) {
|
||||
case 1: {
|
||||
uint8_t *samples = (uint8_t *)s->frame.data[0];
|
||||
uint8_t *samples = (uint8_t *)frame->data[0];
|
||||
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
|
||||
*samples++ = *p + 0x80;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
int16_t *samples = (int16_t *)s->frame.data[0];
|
||||
int16_t *samples = (int16_t *)frame->data[0];
|
||||
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
|
||||
*samples++ = *p;
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
// shift samples for 24-bit sample format
|
||||
int32_t *samples = (int32_t *)s->frame.data[0];
|
||||
int32_t *samples = (int32_t *)frame->data[0];
|
||||
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
|
||||
*samples++ <<= 8;
|
||||
// reset decode buffer
|
||||
@ -471,8 +468,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return buf_size;
|
||||
error:
|
||||
|
@ -177,7 +177,6 @@ static const ModeTab mode_44_48 = {
|
||||
|
||||
typedef struct TwinContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
AVFloatDSPContext fdsp;
|
||||
FFTContext mdct_ctx[3];
|
||||
|
||||
@ -811,6 +810,7 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
|
||||
static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TwinContext *tctx = avctx->priv_data;
|
||||
@ -832,12 +832,12 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
||||
|
||||
/* get output buffer */
|
||||
if (tctx->discarded_packets >= 2) {
|
||||
tctx->frame.nb_samples = mtab->size;
|
||||
if ((ret = ff_get_buffer(avctx, &tctx->frame)) < 0) {
|
||||
frame->nb_samples = mtab->size;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
out = (float **)tctx->frame.extended_data;
|
||||
out = (float **)frame->extended_data;
|
||||
}
|
||||
|
||||
init_get_bits(&gb, buf, buf_size * 8);
|
||||
@ -863,8 +863,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = tctx->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
@ -1171,9 +1170,6 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
|
||||
|
||||
memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
|
||||
|
||||
avcodec_get_frame_defaults(&tctx->frame);
|
||||
avctx->coded_frame = &tctx->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,6 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
|
||||
#define BLOCK_TYPE_SILENCE 3
|
||||
|
||||
typedef struct VmdAudioContext {
|
||||
AVFrame frame;
|
||||
int out_bps;
|
||||
int chunk_size;
|
||||
} VmdAudioContext;
|
||||
@ -514,9 +513,6 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
|
||||
|
||||
s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
|
||||
"block align = %d, sample rate = %d\n",
|
||||
avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
|
||||
@ -557,6 +553,7 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
|
||||
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end;
|
||||
int buf_size = avpkt->size;
|
||||
@ -601,13 +598,14 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
audio_chunks = buf_size / s->chunk_size;
|
||||
|
||||
/* get output buffer */
|
||||
s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
|
||||
avctx->channels;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
output_samples_u8 = s->frame.data[0];
|
||||
output_samples_s16 = (int16_t *)s->frame.data[0];
|
||||
output_samples_u8 = frame->data[0];
|
||||
output_samples_s16 = (int16_t *)frame->data[0];
|
||||
|
||||
/* decode silent chunks */
|
||||
if (silent_chunks > 0) {
|
||||
@ -637,8 +635,7 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
|
@ -125,7 +125,6 @@ typedef struct {
|
||||
|
||||
typedef struct vorbis_context_s {
|
||||
AVCodecContext *avccontext;
|
||||
AVFrame frame;
|
||||
GetBitContext gb;
|
||||
VorbisDSPContext dsp;
|
||||
AVFloatDSPContext fdsp;
|
||||
@ -1040,9 +1039,6 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
|
||||
avccontext->channels = vc->audio_channels;
|
||||
avccontext->sample_rate = vc->audio_samplerate;
|
||||
|
||||
avcodec_get_frame_defaults(&vc->frame);
|
||||
avccontext->coded_frame = &vc->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1653,6 +1649,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
vorbis_context *vc = avccontext->priv_data;
|
||||
AVFrame *frame = data;
|
||||
GetBitContext *gb = &vc->gb;
|
||||
float *channel_ptrs[255];
|
||||
int i, len, ret;
|
||||
@ -1699,19 +1696,19 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
vc->frame.nb_samples = vc->blocksize[1] / 2;
|
||||
if ((ret = ff_get_buffer(avccontext, &vc->frame)) < 0) {
|
||||
frame->nb_samples = vc->blocksize[1] / 2;
|
||||
if ((ret = ff_get_buffer(avccontext, frame)) < 0) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (vc->audio_channels > 8) {
|
||||
for (i = 0; i < vc->audio_channels; i++)
|
||||
channel_ptrs[i] = (float *)vc->frame.extended_data[i];
|
||||
channel_ptrs[i] = (float *)frame->extended_data[i];
|
||||
} else {
|
||||
for (i = 0; i < vc->audio_channels; i++) {
|
||||
int ch = ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i];
|
||||
channel_ptrs[ch] = (float *)vc->frame.extended_data[i];
|
||||
channel_ptrs[ch] = (float *)frame->extended_data[i];
|
||||
}
|
||||
}
|
||||
|
||||
@ -1729,9 +1726,8 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
|
||||
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
|
||||
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
|
||||
|
||||
vc->frame.nb_samples = len;
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = vc->frame;
|
||||
frame->nb_samples = len;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user