mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-02-09 14:14:39 +02:00
Merge commit '86bfcfcf2364bc837b7bb582c66a8a15a332414f'
* commit '86bfcfcf2364bc837b7bb582c66a8a15a332414f': mace: decode directly to the user-provided AVFrame libspeex: decode directly to the user-provided AVFrame libopus: decode directly to the user-provided AVFrame libopencore-amr: decode directly to the user-provided AVFrame libgsm: decode directly to the user-provided AVFrame Conflicts: libavcodec/libopusdec.c libavcodec/mace.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
afe30fe060
@ -152,7 +152,6 @@ AVCodec ff_libgsm_ms_encoder = {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct LibGSMDecodeContext {
|
typedef struct LibGSMDecodeContext {
|
||||||
AVFrame frame;
|
|
||||||
struct gsm_state *state;
|
struct gsm_state *state;
|
||||||
} LibGSMDecodeContext;
|
} LibGSMDecodeContext;
|
||||||
|
|
||||||
@ -180,9 +179,6 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,6 +195,7 @@ static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
LibGSMDecodeContext *s = avctx->priv_data;
|
LibGSMDecodeContext *s = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int16_t *samples;
|
int16_t *samples;
|
||||||
@ -209,12 +206,12 @@ static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = avctx->frame_size;
|
frame->nb_samples = avctx->frame_size;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (int16_t *)s->frame.data[0];
|
samples = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) {
|
for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) {
|
||||||
if ((ret = gsm_decode(s->state, buf, samples)) < 0)
|
if ((ret = gsm_decode(s->state, buf, samples)) < 0)
|
||||||
@ -223,8 +220,7 @@ static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
samples += GSM_FRAME_SIZE;
|
samples += GSM_FRAME_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,6 @@ static int get_bitrate_mode(int bitrate, void *log_ctx)
|
|||||||
|
|
||||||
typedef struct AMRContext {
|
typedef struct AMRContext {
|
||||||
AVClass *av_class;
|
AVClass *av_class;
|
||||||
AVFrame frame;
|
|
||||||
void *dec_state;
|
void *dec_state;
|
||||||
void *enc_state;
|
void *enc_state;
|
||||||
int enc_bitrate;
|
int enc_bitrate;
|
||||||
@ -120,9 +119,6 @@ static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,6 +134,7 @@ static av_cold int amr_nb_decode_close(AVCodecContext *avctx)
|
|||||||
static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
|
static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AMRContext *s = avctx->priv_data;
|
AMRContext *s = avctx->priv_data;
|
||||||
@ -149,8 +146,8 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf, buf_size, avctx->frame_number);
|
buf, buf_size, avctx->frame_number);
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = 160;
|
frame->nb_samples = 160;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -167,10 +164,9 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n",
|
av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n",
|
||||||
packet_size, buf[0], buf[1], buf[2], buf[3]);
|
packet_size, buf[0], buf[1], buf[2], buf[3]);
|
||||||
/* call decoder */
|
/* call decoder */
|
||||||
Decoder_Interface_Decode(s->dec_state, buf, (short *)s->frame.data[0], 0);
|
Decoder_Interface_Decode(s->dec_state, buf, (short *)frame->data[0], 0);
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return packet_size;
|
return packet_size;
|
||||||
}
|
}
|
||||||
@ -314,7 +310,6 @@ AVCodec ff_libopencore_amrnb_encoder = {
|
|||||||
#include <opencore-amrwb/if_rom.h>
|
#include <opencore-amrwb/if_rom.h>
|
||||||
|
|
||||||
typedef struct AMRWBContext {
|
typedef struct AMRWBContext {
|
||||||
AVFrame frame;
|
|
||||||
void *state;
|
void *state;
|
||||||
} AMRWBContext;
|
} AMRWBContext;
|
||||||
|
|
||||||
@ -328,15 +323,13 @@ static av_cold int amr_wb_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->state = D_IF_init();
|
s->state = D_IF_init();
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
|
static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AMRWBContext *s = avctx->priv_data;
|
AMRWBContext *s = avctx->priv_data;
|
||||||
@ -345,8 +338,8 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
|
static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = 320;
|
frame->nb_samples = 320;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -364,10 +357,9 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
D_IF_decode(s->state, buf, (short *)s->frame.data[0], _good_frame);
|
D_IF_decode(s->state, buf, (short *)frame->data[0], _good_frame);
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return packet_size;
|
return packet_size;
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
|
|
||||||
struct libopus_context {
|
struct libopus_context {
|
||||||
OpusMSDecoder *dec;
|
OpusMSDecoder *dec;
|
||||||
AVFrame frame;
|
|
||||||
int pre_skip;
|
int pre_skip;
|
||||||
#ifndef OPUS_SET_GAIN
|
#ifndef OPUS_SET_GAIN
|
||||||
union { int i; double d; } gain;
|
union { int i; double d; } gain;
|
||||||
@ -111,8 +110,7 @@ static av_cold int libopus_decode_init(AVCodecContext *avc)
|
|||||||
|
|
||||||
avc->internal->skip_samples = opus->pre_skip;
|
avc->internal->skip_samples = opus->pre_skip;
|
||||||
avc->delay = 3840; /* Decoder delay (in samples) at 48kHz */
|
avc->delay = 3840; /* Decoder delay (in samples) at 48kHz */
|
||||||
avcodec_get_frame_defaults(&opus->frame);
|
|
||||||
avc->coded_frame = &opus->frame;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,14 +124,15 @@ static av_cold int libopus_decode_close(AVCodecContext *avc)
|
|||||||
|
|
||||||
#define MAX_FRAME_SIZE (960 * 6)
|
#define MAX_FRAME_SIZE (960 * 6)
|
||||||
|
|
||||||
static int libopus_decode(AVCodecContext *avc, void *frame,
|
static int libopus_decode(AVCodecContext *avc, void *data,
|
||||||
int *got_frame_ptr, AVPacket *pkt)
|
int *got_frame_ptr, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
struct libopus_context *opus = avc->priv_data;
|
struct libopus_context *opus = avc->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
int ret, nb_samples;
|
int ret, nb_samples;
|
||||||
|
|
||||||
opus->frame.nb_samples = MAX_FRAME_SIZE;
|
frame->nb_samples = MAX_FRAME_SIZE;
|
||||||
ret = ff_get_buffer(avc, &opus->frame);
|
ret = ff_get_buffer(avc, frame);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(avc, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avc, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
@ -141,12 +140,12 @@ static int libopus_decode(AVCodecContext *avc, void *frame,
|
|||||||
|
|
||||||
if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
|
if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
|
||||||
nb_samples = opus_multistream_decode(opus->dec, pkt->data, pkt->size,
|
nb_samples = opus_multistream_decode(opus->dec, pkt->data, pkt->size,
|
||||||
(opus_int16 *)opus->frame.data[0],
|
(opus_int16 *)frame->data[0],
|
||||||
opus->frame.nb_samples, 0);
|
frame->nb_samples, 0);
|
||||||
else
|
else
|
||||||
nb_samples = opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
|
nb_samples = opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
|
||||||
(float *)opus->frame.data[0],
|
(float *)frame->data[0],
|
||||||
opus->frame.nb_samples, 0);
|
frame->nb_samples, 0);
|
||||||
|
|
||||||
if (nb_samples < 0) {
|
if (nb_samples < 0) {
|
||||||
av_log(avc, AV_LOG_ERROR, "Decoding error: %s\n",
|
av_log(avc, AV_LOG_ERROR, "Decoding error: %s\n",
|
||||||
@ -158,20 +157,20 @@ static int libopus_decode(AVCodecContext *avc, void *frame,
|
|||||||
{
|
{
|
||||||
int i = avc->channels * nb_samples;
|
int i = avc->channels * nb_samples;
|
||||||
if (avc->sample_fmt == AV_SAMPLE_FMT_FLT) {
|
if (avc->sample_fmt == AV_SAMPLE_FMT_FLT) {
|
||||||
float *pcm = (float *)opus->frame.data[0];
|
float *pcm = (float *)frame->data[0];
|
||||||
for (; i > 0; i--, pcm++)
|
for (; i > 0; i--, pcm++)
|
||||||
*pcm = av_clipf(*pcm * opus->gain.d, -1, 1);
|
*pcm = av_clipf(*pcm * opus->gain.d, -1, 1);
|
||||||
} else {
|
} else {
|
||||||
int16_t *pcm = (int16_t *)opus->frame.data[0];
|
int16_t *pcm = (int16_t *)frame->data[0];
|
||||||
for (; i > 0; i--, pcm++)
|
for (; i > 0; i--, pcm++)
|
||||||
*pcm = av_clip_int16(((int64_t)opus->gain.i * *pcm) >> 16);
|
*pcm = av_clip_int16(((int64_t)opus->gain.i * *pcm) >> 16);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
opus->frame.nb_samples = nb_samples;
|
frame->nb_samples = nb_samples;
|
||||||
*(AVFrame *)frame = opus->frame;
|
*got_frame_ptr = 1;
|
||||||
*got_frame_ptr = 1;
|
|
||||||
return pkt->size;
|
return pkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame frame;
|
|
||||||
SpeexBits bits;
|
SpeexBits bits;
|
||||||
SpeexStereoState stereo;
|
SpeexStereoState stereo;
|
||||||
void *dec_state;
|
void *dec_state;
|
||||||
@ -104,9 +103,6 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
|
|||||||
speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback);
|
speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,16 +112,17 @@ static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
uint8_t *buf = avpkt->data;
|
uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
LibSpeexContext *s = avctx->priv_data;
|
LibSpeexContext *s = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
int16_t *output;
|
int16_t *output;
|
||||||
int ret, consumed = 0;
|
int ret, consumed = 0;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = s->frame_size;
|
frame->nb_samples = s->frame_size;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
output = (int16_t *)s->frame.data[0];
|
output = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
/* if there is not enough data left for the smallest possible frame or the
|
/* if there is not enough data left for the smallest possible frame or the
|
||||||
next 5 bits are a terminator code, reset the libspeex buffer using the
|
next 5 bits are a terminator code, reset the libspeex buffer using the
|
||||||
@ -152,8 +149,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (avctx->channels == 2)
|
if (avctx->channels == 2)
|
||||||
speex_decode_stereo_int(output, s->frame_size, &s->stereo);
|
speex_decode_stereo_int(output, s->frame_size, &s->stereo);
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return consumed;
|
return consumed;
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,6 @@ typedef struct ChannelData {
|
|||||||
} ChannelData;
|
} ChannelData;
|
||||||
|
|
||||||
typedef struct MACEContext {
|
typedef struct MACEContext {
|
||||||
AVFrame frame;
|
|
||||||
ChannelData chd[2];
|
ChannelData chd[2];
|
||||||
} MACEContext;
|
} MACEContext;
|
||||||
|
|
||||||
@ -227,21 +226,17 @@ static void chomp6(ChannelData *chd, int16_t *output, uint8_t val, int tab_idx)
|
|||||||
|
|
||||||
static av_cold int mace_decode_init(AVCodecContext * avctx)
|
static av_cold int mace_decode_init(AVCodecContext * avctx)
|
||||||
{
|
{
|
||||||
MACEContext *ctx = avctx->priv_data;
|
|
||||||
|
|
||||||
if (avctx->channels > 2 || avctx->channels <= 0)
|
if (avctx->channels > 2 || avctx->channels <= 0)
|
||||||
return -1;
|
return -1;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ctx->frame);
|
|
||||||
avctx->coded_frame = &ctx->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mace_decode_frame(AVCodecContext *avctx, void *data,
|
static int mace_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int16_t **samples;
|
int16_t **samples;
|
||||||
@ -250,12 +245,12 @@ static int mace_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int is_mace3 = (avctx->codec_id == AV_CODEC_ID_MACE3);
|
int is_mace3 = (avctx->codec_id == AV_CODEC_ID_MACE3);
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ctx->frame.nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
|
frame->nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
|
||||||
if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (int16_t **)ctx->frame.extended_data;
|
samples = (int16_t **)frame->extended_data;
|
||||||
|
|
||||||
for(i = 0; i < avctx->channels; i++) {
|
for(i = 0; i < avctx->channels; i++) {
|
||||||
int16_t *output = samples[i];
|
int16_t *output = samples[i];
|
||||||
@ -279,8 +274,7 @@ static int mace_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = ctx->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user