mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-08 13:22:53 +02:00
libmp3lame: use planar sample formats
This commit is contained in:
parent
233783e2c1
commit
473b297f26
@ -33,6 +33,7 @@
|
||||
#include "libavutil/opt.h"
|
||||
#include "avcodec.h"
|
||||
#include "audio_frame_queue.h"
|
||||
#include "dsputil.h"
|
||||
#include "internal.h"
|
||||
#include "mpegaudio.h"
|
||||
#include "mpegaudiodecheader.h"
|
||||
@ -46,8 +47,9 @@ typedef struct LAMEContext {
|
||||
uint8_t buffer[BUFFER_SIZE];
|
||||
int buffer_index;
|
||||
int reservoir;
|
||||
void *planar_samples[2];
|
||||
float *samples_flt[2];
|
||||
AudioFrameQueue afq;
|
||||
DSPContext dsp;
|
||||
} LAMEContext;
|
||||
|
||||
|
||||
@ -58,8 +60,8 @@ static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
av_freep(&avctx->coded_frame);
|
||||
#endif
|
||||
av_freep(&s->planar_samples[0]);
|
||||
av_freep(&s->planar_samples[1]);
|
||||
av_freep(&s->samples_flt[0]);
|
||||
av_freep(&s->samples_flt[1]);
|
||||
|
||||
ff_af_queue_close(&s->afq);
|
||||
|
||||
@ -126,93 +128,63 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* sample format */
|
||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32 ||
|
||||
avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
|
||||
/* allocate float sample buffers */
|
||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
|
||||
int ch;
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
s->planar_samples[ch] = av_malloc(avctx->frame_size *
|
||||
av_get_bytes_per_sample(avctx->sample_fmt));
|
||||
if (!s->planar_samples[ch]) {
|
||||
s->samples_flt[ch] = av_malloc(avctx->frame_size *
|
||||
sizeof(*s->samples_flt[ch]));
|
||||
if (!s->samples_flt[ch]) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
|
||||
return 0;
|
||||
error:
|
||||
mp3lame_encode_close(avctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define DEINTERLEAVE(type, scale) do { \
|
||||
int ch, i; \
|
||||
for (ch = 0; ch < s->avctx->channels; ch++) { \
|
||||
const type *input = samples; \
|
||||
type *output = s->planar_samples[ch]; \
|
||||
input += ch; \
|
||||
for (i = 0; i < nb_samples; i++) { \
|
||||
output[i] = *input * scale; \
|
||||
input += s->avctx->channels; \
|
||||
} \
|
||||
} \
|
||||
#define ENCODE_BUFFER(func, buf_type, buf_name) do { \
|
||||
lame_result = func(s->gfp, \
|
||||
(const buf_type *)buf_name[0], \
|
||||
(const buf_type *)buf_name[1], frame->nb_samples, \
|
||||
s->buffer + s->buffer_index, \
|
||||
BUFFER_SIZE - s->buffer_index); \
|
||||
} while (0)
|
||||
|
||||
static int encode_frame_int16(LAMEContext *s, void *samples, int nb_samples)
|
||||
{
|
||||
if (s->avctx->channels > 1) {
|
||||
return lame_encode_buffer_interleaved(s->gfp, samples,
|
||||
nb_samples,
|
||||
s->buffer + s->buffer_index,
|
||||
BUFFER_SIZE - s->buffer_index);
|
||||
} else {
|
||||
return lame_encode_buffer(s->gfp, samples, NULL, nb_samples,
|
||||
s->buffer + s->buffer_index,
|
||||
BUFFER_SIZE - s->buffer_index);
|
||||
}
|
||||
}
|
||||
|
||||
static int encode_frame_int32(LAMEContext *s, void *samples, int nb_samples)
|
||||
{
|
||||
DEINTERLEAVE(int32_t, 1);
|
||||
|
||||
return lame_encode_buffer_int(s->gfp,
|
||||
s->planar_samples[0], s->planar_samples[1],
|
||||
nb_samples,
|
||||
s->buffer + s->buffer_index,
|
||||
BUFFER_SIZE - s->buffer_index);
|
||||
}
|
||||
|
||||
static int encode_frame_float(LAMEContext *s, void *samples, int nb_samples)
|
||||
{
|
||||
DEINTERLEAVE(float, 32768.0f);
|
||||
|
||||
return lame_encode_buffer_float(s->gfp,
|
||||
s->planar_samples[0], s->planar_samples[1],
|
||||
nb_samples,
|
||||
s->buffer + s->buffer_index,
|
||||
BUFFER_SIZE - s->buffer_index);
|
||||
}
|
||||
|
||||
static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr)
|
||||
{
|
||||
LAMEContext *s = avctx->priv_data;
|
||||
MPADecodeHeader hdr;
|
||||
int len, ret;
|
||||
int len, ret, ch;
|
||||
int lame_result;
|
||||
|
||||
if (frame) {
|
||||
switch (avctx->sample_fmt) {
|
||||
case AV_SAMPLE_FMT_S16:
|
||||
lame_result = encode_frame_int16(s, frame->data[0], frame->nb_samples);
|
||||
case AV_SAMPLE_FMT_S16P:
|
||||
ENCODE_BUFFER(lame_encode_buffer, int16_t, frame->data);
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S32:
|
||||
lame_result = encode_frame_int32(s, frame->data[0], frame->nb_samples);
|
||||
case AV_SAMPLE_FMT_S32P:
|
||||
ENCODE_BUFFER(lame_encode_buffer_int, int32_t, frame->data);
|
||||
break;
|
||||
case AV_SAMPLE_FMT_FLT:
|
||||
lame_result = encode_frame_float(s, frame->data[0], frame->nb_samples);
|
||||
case AV_SAMPLE_FMT_FLTP:
|
||||
if (frame->linesize[0] < 4 * FFALIGN(frame->nb_samples, 8)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "inadequate AVFrame plane padding\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
s->dsp.vector_fmul_scalar(s->samples_flt[ch],
|
||||
(const float *)frame->data[ch],
|
||||
32768.0f,
|
||||
FFALIGN(frame->nb_samples, 8));
|
||||
}
|
||||
ENCODE_BUFFER(lame_encode_buffer_float, float, s->samples_flt);
|
||||
break;
|
||||
default:
|
||||
return AVERROR_BUG;
|
||||
@ -300,9 +272,9 @@ AVCodec ff_libmp3lame_encoder = {
|
||||
.encode2 = mp3lame_encode_frame,
|
||||
.close = mp3lame_encode_close,
|
||||
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32,
|
||||
AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_S16,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
|
||||
AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_S16P,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.supported_samplerates = libmp3lame_sample_rates,
|
||||
.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
|
||||
|
Loading…
Reference in New Issue
Block a user