mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Merge commit '4a2b26fc1b1ad123eba473a20e270f2b0ba92bca'
* commit '4a2b26fc1b1ad123eba473a20e270f2b0ba92bca': tak: decode directly to the user-provided AVFrame smackaud: decode directly to the user-provided AVFrame sipr: decode directly to the user-provided AVFrame shorten: decode directly to the user-provided AVFrame Conflicts: libavcodec/shorten.c libavcodec/takdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
2becf21d9f
@ -84,7 +84,6 @@ static const uint8_t is_audio_command[10] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0 };
|
|||||||
|
|
||||||
typedef struct ShortenContext {
|
typedef struct ShortenContext {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
|
|
||||||
int min_framesize, max_framesize;
|
int min_framesize, max_framesize;
|
||||||
@ -118,9 +117,6 @@ static av_cold int shorten_decode_init(AVCodecContext * avctx)
|
|||||||
ShortenContext *s = avctx->priv_data;
|
ShortenContext *s = avctx->priv_data;
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,6 +403,7 @@ static int read_header(ShortenContext *s)
|
|||||||
static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ShortenContext *s = avctx->priv_data;
|
ShortenContext *s = avctx->priv_data;
|
||||||
@ -585,15 +582,15 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int chan;
|
int chan;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = s->blocksize;
|
frame->nb_samples = s->blocksize;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (chan = 0; chan < s->channels; chan++) {
|
for (chan = 0; chan < s->channels; chan++) {
|
||||||
samples_u8 = ((uint8_t **)s->frame.extended_data)[chan];
|
samples_u8 = ((uint8_t **)frame->extended_data)[chan];
|
||||||
samples_s16 = ((int16_t **)s->frame.extended_data)[chan];
|
samples_s16 = ((int16_t **)frame->extended_data)[chan];
|
||||||
for (i = 0; i < s->blocksize; i++) {
|
for (i = 0; i < s->blocksize; i++) {
|
||||||
switch (s->internal_ftype) {
|
switch (s->internal_ftype) {
|
||||||
case TYPE_U8:
|
case TYPE_U8:
|
||||||
@ -607,9 +604,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*got_frame_ptr = 1;
|
||||||
*got_frame_ptr = 1;
|
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -516,9 +516,6 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
|
|||||||
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ctx->frame);
|
|
||||||
avctx->coded_frame = &ctx->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,6 +523,7 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
SiprContext *ctx = avctx->priv_data;
|
SiprContext *ctx = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf=avpkt->data;
|
const uint8_t *buf=avpkt->data;
|
||||||
SiprParameters parm;
|
SiprParameters parm;
|
||||||
const SiprModeParam *mode_par = &modes[ctx->mode];
|
const SiprModeParam *mode_par = &modes[ctx->mode];
|
||||||
@ -543,13 +541,13 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ctx->frame.nb_samples = mode_par->frames_per_packet * subframe_size *
|
frame->nb_samples = mode_par->frames_per_packet * subframe_size *
|
||||||
mode_par->subframe_count;
|
mode_par->subframe_count;
|
||||||
if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (float *)ctx->frame.data[0];
|
samples = (float *)frame->data[0];
|
||||||
|
|
||||||
init_get_bits(&gb, buf, mode_par->bits_per_frame);
|
init_get_bits(&gb, buf, mode_par->bits_per_frame);
|
||||||
|
|
||||||
@ -561,8 +559,7 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
samples += subframe_size * mode_par->subframe_count;
|
samples += subframe_size * mode_par->subframe_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = ctx->frame;
|
|
||||||
|
|
||||||
return mode_par->bits_per_frame >> 3;
|
return mode_par->bits_per_frame >> 3;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,6 @@ typedef struct SiprParameters {
|
|||||||
|
|
||||||
typedef struct SiprContext {
|
typedef struct SiprContext {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
|
|
||||||
SiprMode mode;
|
SiprMode mode;
|
||||||
|
|
||||||
|
@ -572,14 +572,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
typedef struct SmackerAudioContext {
|
|
||||||
AVFrame frame;
|
|
||||||
} SmackerAudioContext;
|
|
||||||
|
|
||||||
static av_cold int smka_decode_init(AVCodecContext *avctx)
|
static av_cold int smka_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
SmackerAudioContext *s = avctx->priv_data;
|
|
||||||
|
|
||||||
if (avctx->channels < 1 || avctx->channels > 2) {
|
if (avctx->channels < 1 || avctx->channels > 2) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
|
av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
@ -587,9 +581,6 @@ static av_cold int smka_decode_init(AVCodecContext *avctx)
|
|||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -599,7 +590,7 @@ static av_cold int smka_decode_init(AVCodecContext *avctx)
|
|||||||
static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
SmackerAudioContext *s = avctx->priv_data;
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
@ -644,13 +635,13 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
|
frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (int16_t *)s->frame.data[0];
|
samples = (int16_t *)frame->data[0];
|
||||||
samples8 = s->frame.data[0];
|
samples8 = frame->data[0];
|
||||||
|
|
||||||
// Initialize
|
// Initialize
|
||||||
for(i = 0; i < (1 << (bits + stereo)); i++) {
|
for(i = 0; i < (1 << (bits + stereo)); i++) {
|
||||||
@ -769,8 +760,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
av_free(h[i].values);
|
av_free(h[i].values);
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
@ -791,7 +781,6 @@ AVCodec ff_smackaud_decoder = {
|
|||||||
.name = "smackaud",
|
.name = "smackaud",
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
.id = AV_CODEC_ID_SMACKAUDIO,
|
.id = AV_CODEC_ID_SMACKAUDIO,
|
||||||
.priv_data_size = sizeof(SmackerAudioContext),
|
|
||||||
.init = smka_decode_init,
|
.init = smka_decode_init,
|
||||||
.decode = smka_decode_frame,
|
.decode = smka_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
|
@ -45,7 +45,6 @@ typedef struct MCDParam {
|
|||||||
|
|
||||||
typedef struct TAKDecContext {
|
typedef struct TAKDecContext {
|
||||||
AVCodecContext *avctx; ///< parent AVCodecContext
|
AVCodecContext *avctx; ///< parent AVCodecContext
|
||||||
AVFrame frame; ///< AVFrame for decoded output
|
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
TAKStreamInfo ti;
|
TAKStreamInfo ti;
|
||||||
GetBitContext gb; ///< bitstream reader initialized to start at the current frame
|
GetBitContext gb; ///< bitstream reader initialized to start at the current frame
|
||||||
@ -175,8 +174,6 @@ static av_cold int tak_decode_init(AVCodecContext *avctx)
|
|||||||
ff_dsputil_init(&s->dsp, avctx);
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
|
avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
|
||||||
|
|
||||||
set_sample_rate_params(avctx);
|
set_sample_rate_params(avctx);
|
||||||
@ -688,6 +685,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *pkt)
|
int *got_frame_ptr, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
TAKDecContext *s = avctx->priv_data;
|
TAKDecContext *s = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
int chan, i, ret, hsize;
|
int chan, i, ret, hsize;
|
||||||
|
|
||||||
@ -750,8 +748,8 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
|
s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
|
||||||
: s->ti.frame_samples;
|
: s->ti.frame_samples;
|
||||||
|
|
||||||
s->frame.nb_samples = s->nb_samples;
|
frame->nb_samples = s->nb_samples;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (avctx->bits_per_raw_sample <= 16) {
|
if (avctx->bits_per_raw_sample <= 16) {
|
||||||
@ -768,7 +766,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
for (chan = 0; chan < avctx->channels; chan++)
|
for (chan = 0; chan < avctx->channels; chan++)
|
||||||
s->decoded[chan] = (int32_t *)s->frame.extended_data[chan];
|
s->decoded[chan] = (int32_t *)frame->extended_data[chan];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->nb_samples < 16) {
|
if (s->nb_samples < 16) {
|
||||||
@ -886,7 +884,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
switch (avctx->sample_fmt) {
|
switch (avctx->sample_fmt) {
|
||||||
case AV_SAMPLE_FMT_U8P:
|
case AV_SAMPLE_FMT_U8P:
|
||||||
for (chan = 0; chan < avctx->channels; chan++) {
|
for (chan = 0; chan < avctx->channels; chan++) {
|
||||||
uint8_t *samples = (uint8_t *)s->frame.extended_data[chan];
|
uint8_t *samples = (uint8_t *)frame->extended_data[chan];
|
||||||
int32_t *decoded = s->decoded[chan];
|
int32_t *decoded = s->decoded[chan];
|
||||||
for (i = 0; i < s->nb_samples; i++)
|
for (i = 0; i < s->nb_samples; i++)
|
||||||
samples[i] = decoded[i] + 0x80;
|
samples[i] = decoded[i] + 0x80;
|
||||||
@ -894,7 +892,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
break;
|
break;
|
||||||
case AV_SAMPLE_FMT_S16P:
|
case AV_SAMPLE_FMT_S16P:
|
||||||
for (chan = 0; chan < avctx->channels; chan++) {
|
for (chan = 0; chan < avctx->channels; chan++) {
|
||||||
int16_t *samples = (int16_t *)s->frame.extended_data[chan];
|
int16_t *samples = (int16_t *)frame->extended_data[chan];
|
||||||
int32_t *decoded = s->decoded[chan];
|
int32_t *decoded = s->decoded[chan];
|
||||||
for (i = 0; i < s->nb_samples; i++)
|
for (i = 0; i < s->nb_samples; i++)
|
||||||
samples[i] = decoded[i];
|
samples[i] = decoded[i];
|
||||||
@ -902,15 +900,14 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
break;
|
break;
|
||||||
case AV_SAMPLE_FMT_S32P:
|
case AV_SAMPLE_FMT_S32P:
|
||||||
for (chan = 0; chan < avctx->channels; chan++) {
|
for (chan = 0; chan < avctx->channels; chan++) {
|
||||||
int32_t *samples = (int32_t *)s->frame.extended_data[chan];
|
int32_t *samples = (int32_t *)frame->extended_data[chan];
|
||||||
for (i = 0; i < s->nb_samples; i++)
|
for (i = 0; i < s->nb_samples; i++)
|
||||||
samples[i] <<= 8;
|
samples[i] <<= 8;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return pkt->size;
|
return pkt->size;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user