mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge commit '1b9b6d6e5ea556b6d307f9d473f54f6406fdc3c8'
* commit '1b9b6d6e5ea556b6d307f9d473f54f6406fdc3c8': qcelp: decode directly to the user-provided AVFrame pcm-bluray: decode directly to the user-provided AVFrame nellymoser: decode directly to the user-provided AVFrame mpc7/8: decode directly to the user-provided AVFrame mpegaudio: decode directly to the user-provided AVFrame mlp/truehd: decode directly to the user-provided AVFrame Conflicts: libavcodec/mpc7.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
65da700704
@ -118,7 +118,6 @@ typedef struct SubStream {
|
|||||||
|
|
||||||
typedef struct MLPDecodeContext {
|
typedef struct MLPDecodeContext {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
|
|
||||||
/// Current access unit being read has a major sync.
|
/// Current access unit being read has a major sync.
|
||||||
int is_major_sync_unit;
|
int is_major_sync_unit;
|
||||||
@ -271,9 +270,6 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
|
|||||||
m->substream[substr].lossless_check_data = 0xffffffff;
|
m->substream[substr].lossless_check_data = 0xffffffff;
|
||||||
ff_mlpdsp_init(&m->dsp);
|
ff_mlpdsp_init(&m->dsp);
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&m->frame);
|
|
||||||
avctx->coded_frame = &m->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,7 +1001,7 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
|
|||||||
/** Write the audio data into the output buffer. */
|
/** Write the audio data into the output buffer. */
|
||||||
|
|
||||||
static int output_data(MLPDecodeContext *m, unsigned int substr,
|
static int output_data(MLPDecodeContext *m, unsigned int substr,
|
||||||
void *data, int *got_frame_ptr)
|
AVFrame *frame, int *got_frame_ptr)
|
||||||
{
|
{
|
||||||
AVCodecContext *avctx = m->avctx;
|
AVCodecContext *avctx = m->avctx;
|
||||||
SubStream *s = &m->substream[substr];
|
SubStream *s = &m->substream[substr];
|
||||||
@ -1021,13 +1017,13 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
m->frame.nb_samples = s->blockpos;
|
frame->nb_samples = s->blockpos;
|
||||||
if ((ret = ff_get_buffer(avctx, &m->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
data_32 = (int32_t *)m->frame.data[0];
|
data_32 = (int32_t *)frame->data[0];
|
||||||
data_16 = (int16_t *)m->frame.data[0];
|
data_16 = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
for (i = 0; i < s->blockpos; i++) {
|
for (i = 0; i < s->blockpos; i++) {
|
||||||
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
|
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
|
||||||
@ -1041,7 +1037,6 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = m->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,6 @@ typedef struct Band {
|
|||||||
}Band;
|
}Band;
|
||||||
|
|
||||||
typedef struct MPCContext {
|
typedef struct MPCContext {
|
||||||
AVFrame frame;
|
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
MPADSPContext mpadsp;
|
MPADSPContext mpadsp;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
|
@ -95,9 +95,6 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
|
|||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&c->frame);
|
|
||||||
avctx->coded_frame = &c->frame;
|
|
||||||
|
|
||||||
if(vlc_initialized) return 0;
|
if(vlc_initialized) return 0;
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
||||||
scfi_vlc.table = scfi_table;
|
scfi_vlc.table = scfi_table;
|
||||||
@ -197,6 +194,7 @@ static int get_scale_idx(GetBitContext *gb, int ref)
|
|||||||
static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size;
|
int buf_size;
|
||||||
MPCContext *c = avctx->priv_data;
|
MPCContext *c = avctx->priv_data;
|
||||||
@ -226,8 +224,8 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
buf_size -= 4;
|
buf_size -= 4;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
c->frame.nb_samples = MPC_FRAME_SIZE;
|
frame->nb_samples = MPC_FRAME_SIZE;
|
||||||
if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -295,9 +293,9 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
for(ch = 0; ch < 2; ch++)
|
for(ch = 0; ch < 2; ch++)
|
||||||
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
|
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
|
||||||
|
|
||||||
ff_mpc_dequantize_and_synth(c, mb, (int16_t **)c->frame.extended_data, 2);
|
ff_mpc_dequantize_and_synth(c, mb, (int16_t **)frame->extended_data, 2);
|
||||||
if(last_frame)
|
if(last_frame)
|
||||||
c->frame.nb_samples = c->lastframelen;
|
frame->nb_samples = c->lastframelen;
|
||||||
|
|
||||||
bits_used = get_bits_count(&gb);
|
bits_used = get_bits_count(&gb);
|
||||||
bits_avail = buf_size * 8;
|
bits_avail = buf_size * 8;
|
||||||
@ -312,7 +310,6 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = c->frame;
|
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
@ -144,9 +144,6 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
|||||||
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
avctx->channels = channels;
|
avctx->channels = channels;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&c->frame);
|
|
||||||
avctx->coded_frame = &c->frame;
|
|
||||||
|
|
||||||
if(vlc_initialized) return 0;
|
if(vlc_initialized) return 0;
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
||||||
|
|
||||||
@ -244,6 +241,7 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
|||||||
static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
MPCContext *c = avctx->priv_data;
|
MPCContext *c = avctx->priv_data;
|
||||||
@ -255,8 +253,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
int last[2];
|
int last[2];
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
c->frame.nb_samples = MPC_FRAME_SIZE;
|
frame->nb_samples = MPC_FRAME_SIZE;
|
||||||
if ((res = ff_get_buffer(avctx, &c->frame)) < 0) {
|
if ((res = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -415,7 +413,7 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ff_mpc_dequantize_and_synth(c, maxband - 1,
|
ff_mpc_dequantize_and_synth(c, maxband - 1,
|
||||||
(int16_t **)c->frame.extended_data,
|
(int16_t **)frame->extended_data,
|
||||||
avctx->channels);
|
avctx->channels);
|
||||||
|
|
||||||
c->cur_frame++;
|
c->cur_frame++;
|
||||||
@ -427,7 +425,6 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
c->cur_frame = 0;
|
c->cur_frame = 0;
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = c->frame;
|
|
||||||
|
|
||||||
return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
|
return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ typedef struct MPADecodeContext {
|
|||||||
AVCodecContext* avctx;
|
AVCodecContext* avctx;
|
||||||
MPADSPContext mpadsp;
|
MPADSPContext mpadsp;
|
||||||
AVFloatDSPContext fdsp;
|
AVFloatDSPContext fdsp;
|
||||||
AVFrame frame;
|
AVFrame *frame;
|
||||||
} MPADecodeContext;
|
} MPADecodeContext;
|
||||||
|
|
||||||
#if CONFIG_FLOAT
|
#if CONFIG_FLOAT
|
||||||
@ -455,9 +455,6 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
|||||||
if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
|
if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
|
||||||
s->adu_mode = 1;
|
s->adu_mode = 1;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1630,12 +1627,13 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
if (!samples) {
|
if (!samples) {
|
||||||
s->frame.nb_samples = s->avctx->frame_size;
|
av_assert0(s->frame != NULL);
|
||||||
if ((ret = ff_get_buffer(s->avctx, &s->frame)) < 0) {
|
s->frame->nb_samples = s->avctx->frame_size;
|
||||||
|
if ((ret = ff_get_buffer(s->avctx, s->frame)) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (OUT_INT **)s->frame.extended_data;
|
samples = (OUT_INT **)s->frame->extended_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* apply the synthesis filter */
|
/* apply the synthesis filter */
|
||||||
@ -1707,10 +1705,12 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
|||||||
buf_size= s->frame_size;
|
buf_size= s->frame_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->frame = data;
|
||||||
|
|
||||||
ret = mp_decode_frame(s, NULL, buf, buf_size);
|
ret = mp_decode_frame(s, NULL, buf, buf_size);
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
|
s->frame->nb_samples = avctx->frame_size;
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
avctx->sample_rate = s->sample_rate;
|
avctx->sample_rate = s->sample_rate;
|
||||||
//FIXME maybe move the other codec info stuff from above here too
|
//FIXME maybe move the other codec info stuff from above here too
|
||||||
} else {
|
} else {
|
||||||
@ -1779,6 +1779,8 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
s->frame_size = len;
|
s->frame_size = len;
|
||||||
|
|
||||||
|
s->frame = data;
|
||||||
|
|
||||||
ret = mp_decode_frame(s, NULL, buf, buf_size);
|
ret = mp_decode_frame(s, NULL, buf, buf_size);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
|
av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
|
||||||
@ -1786,7 +1788,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
@ -1798,7 +1799,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data,
|
|||||||
* Context for MP3On4 decoder
|
* Context for MP3On4 decoder
|
||||||
*/
|
*/
|
||||||
typedef struct MP3On4DecodeContext {
|
typedef struct MP3On4DecodeContext {
|
||||||
AVFrame *frame;
|
|
||||||
int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
|
int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
|
||||||
int syncword; ///< syncword patch
|
int syncword; ///< syncword patch
|
||||||
const uint8_t *coff; ///< channel offsets in output buffer
|
const uint8_t *coff; ///< channel offsets in output buffer
|
||||||
@ -1887,7 +1887,6 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
|
|||||||
// Put decoder context in place to make init_decode() happy
|
// Put decoder context in place to make init_decode() happy
|
||||||
avctx->priv_data = s->mp3decctx[0];
|
avctx->priv_data = s->mp3decctx[0];
|
||||||
decode_init(avctx);
|
decode_init(avctx);
|
||||||
s->frame = avctx->coded_frame;
|
|
||||||
// Restore mp3on4 context pointer
|
// Restore mp3on4 context pointer
|
||||||
avctx->priv_data = s;
|
avctx->priv_data = s;
|
||||||
s->mp3decctx[0]->adu_mode = 1; // Set adu mode
|
s->mp3decctx[0]->adu_mode = 1; // Set adu mode
|
||||||
@ -1924,6 +1923,7 @@ static void flush_mp3on4(AVCodecContext *avctx)
|
|||||||
static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
MP3On4DecodeContext *s = avctx->priv_data;
|
MP3On4DecodeContext *s = avctx->priv_data;
|
||||||
@ -1935,12 +1935,12 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
|||||||
int fr, ch, ret;
|
int fr, ch, ret;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame->nb_samples = MPA_FRAME_SIZE;
|
frame->nb_samples = MPA_FRAME_SIZE;
|
||||||
if ((ret = ff_get_buffer(avctx, s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
out_samples = (OUT_INT **)s->frame->extended_data;
|
out_samples = (OUT_INT **)frame->extended_data;
|
||||||
|
|
||||||
// Discard too short frames
|
// Discard too short frames
|
||||||
if (buf_size < HEADER_SIZE)
|
if (buf_size < HEADER_SIZE)
|
||||||
@ -1990,9 +1990,8 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
|||||||
/* update codec info */
|
/* update codec info */
|
||||||
avctx->sample_rate = s->mp3decctx[0]->sample_rate;
|
avctx->sample_rate = s->mp3decctx[0]->sample_rate;
|
||||||
|
|
||||||
s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
|
frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = *s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,6 @@
|
|||||||
|
|
||||||
typedef struct NellyMoserDecodeContext {
|
typedef struct NellyMoserDecodeContext {
|
||||||
AVCodecContext* avctx;
|
AVCodecContext* avctx;
|
||||||
AVFrame frame;
|
|
||||||
AVLFG random_state;
|
AVLFG random_state;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
float scale_bias;
|
float scale_bias;
|
||||||
@ -136,15 +135,13 @@ static av_cold int decode_init(AVCodecContext * avctx) {
|
|||||||
avctx->channels = 1;
|
avctx->channels = 1;
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_tag(AVCodecContext *avctx, void *data,
|
static int decode_tag(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
const uint8_t *side=av_packet_get_side_data(avpkt, 'F', NULL);
|
const uint8_t *side=av_packet_get_side_data(avpkt, 'F', NULL);
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -174,12 +171,12 @@ static int decode_tag(AVCodecContext *avctx, void *data,
|
|||||||
avctx->sample_rate= 11025*(blocks/2);
|
avctx->sample_rate= 11025*(blocks/2);
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = NELLY_SAMPLES * blocks;
|
frame->nb_samples = NELLY_SAMPLES * blocks;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples_flt = (float *)s->frame.data[0];
|
samples_flt = (float *)frame->data[0];
|
||||||
|
|
||||||
for (i=0 ; i<blocks ; i++) {
|
for (i=0 ; i<blocks ; i++) {
|
||||||
nelly_decode_block(s, buf, samples_flt);
|
nelly_decode_block(s, buf, samples_flt);
|
||||||
@ -188,7 +185,6 @@ static int decode_tag(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
@ -122,26 +122,12 @@ static int pcm_bluray_parse_header(AVCodecContext *avctx,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct PCMBRDecode {
|
|
||||||
AVFrame frame;
|
|
||||||
} PCMBRDecode;
|
|
||||||
|
|
||||||
static av_cold int pcm_bluray_decode_init(AVCodecContext * avctx)
|
|
||||||
{
|
|
||||||
PCMBRDecode *s = avctx->priv_data;
|
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
|
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *src = avpkt->data;
|
const uint8_t *src = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
PCMBRDecode *s = avctx->priv_data;
|
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
int num_source_channels, channel, retval;
|
int num_source_channels, channel, retval;
|
||||||
int sample_size, samples;
|
int sample_size, samples;
|
||||||
@ -166,13 +152,13 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
samples = buf_size / sample_size;
|
samples = buf_size / sample_size;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = samples;
|
frame->nb_samples = samples;
|
||||||
if ((retval = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((retval = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
dst16 = (int16_t *)s->frame.data[0];
|
dst16 = (int16_t *)frame->data[0];
|
||||||
dst32 = (int32_t *)s->frame.data[0];
|
dst32 = (int32_t *)frame->data[0];
|
||||||
|
|
||||||
if (samples) {
|
if (samples) {
|
||||||
switch (avctx->channel_layout) {
|
switch (avctx->channel_layout) {
|
||||||
@ -307,7 +293,6 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
retval = bytestream2_tell(&gb);
|
retval = bytestream2_tell(&gb);
|
||||||
if (avctx->debug & FF_DEBUG_BITSTREAM)
|
if (avctx->debug & FF_DEBUG_BITSTREAM)
|
||||||
@ -320,8 +305,6 @@ AVCodec ff_pcm_bluray_decoder = {
|
|||||||
.name = "pcm_bluray",
|
.name = "pcm_bluray",
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
.id = AV_CODEC_ID_PCM_BLURAY,
|
.id = AV_CODEC_ID_PCM_BLURAY,
|
||||||
.priv_data_size = sizeof(PCMBRDecode),
|
|
||||||
.init = pcm_bluray_decode_init,
|
|
||||||
.decode = pcm_bluray_decode_frame,
|
.decode = pcm_bluray_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
.sample_fmts = (const enum AVSampleFormat[]){
|
.sample_fmts = (const enum AVSampleFormat[]){
|
||||||
|
@ -53,7 +53,6 @@ typedef enum {
|
|||||||
} qcelp_packet_rate;
|
} qcelp_packet_rate;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame avframe;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
qcelp_packet_rate bitrate;
|
qcelp_packet_rate bitrate;
|
||||||
QCELPFrame frame; /**< unpacked data frame */
|
QCELPFrame frame; /**< unpacked data frame */
|
||||||
@ -97,9 +96,6 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx)
|
|||||||
for (i = 0; i < 10; i++)
|
for (i = 0; i < 10; i++)
|
||||||
q->prev_lspf[i] = (i + 1) / 11.;
|
q->prev_lspf[i] = (i + 1) / 11.;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&q->avframe);
|
|
||||||
avctx->coded_frame = &q->avframe;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -690,6 +686,7 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QCELPContext *q = avctx->priv_data;
|
QCELPContext *q = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
float *outbuffer;
|
float *outbuffer;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
float quantized_lspf[10], lpc[10];
|
float quantized_lspf[10], lpc[10];
|
||||||
@ -697,12 +694,12 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
float *formant_mem;
|
float *formant_mem;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
q->avframe.nb_samples = 160;
|
frame->nb_samples = 160;
|
||||||
if ((ret = ff_get_buffer(avctx, &q->avframe)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
outbuffer = (float *)q->avframe.data[0];
|
outbuffer = (float *)frame->data[0];
|
||||||
|
|
||||||
if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) {
|
if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) {
|
||||||
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
|
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
|
||||||
@ -786,7 +783,6 @@ erasure:
|
|||||||
q->prev_bitrate = q->bitrate;
|
q->prev_bitrate = q->bitrate;
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = q->avframe;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user