mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-24 13:56:33 +02:00
libmp3lame: use planar sample formats
This commit is contained in:
parent
233783e2c1
commit
473b297f26
@ -33,6 +33,7 @@
|
|||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "audio_frame_queue.h"
|
#include "audio_frame_queue.h"
|
||||||
|
#include "dsputil.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "mpegaudio.h"
|
#include "mpegaudio.h"
|
||||||
#include "mpegaudiodecheader.h"
|
#include "mpegaudiodecheader.h"
|
||||||
@ -46,8 +47,9 @@ typedef struct LAMEContext {
|
|||||||
uint8_t buffer[BUFFER_SIZE];
|
uint8_t buffer[BUFFER_SIZE];
|
||||||
int buffer_index;
|
int buffer_index;
|
||||||
int reservoir;
|
int reservoir;
|
||||||
void *planar_samples[2];
|
float *samples_flt[2];
|
||||||
AudioFrameQueue afq;
|
AudioFrameQueue afq;
|
||||||
|
DSPContext dsp;
|
||||||
} LAMEContext;
|
} LAMEContext;
|
||||||
|
|
||||||
|
|
||||||
@ -58,8 +60,8 @@ static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
|
|||||||
#if FF_API_OLD_ENCODE_AUDIO
|
#if FF_API_OLD_ENCODE_AUDIO
|
||||||
av_freep(&avctx->coded_frame);
|
av_freep(&avctx->coded_frame);
|
||||||
#endif
|
#endif
|
||||||
av_freep(&s->planar_samples[0]);
|
av_freep(&s->samples_flt[0]);
|
||||||
av_freep(&s->planar_samples[1]);
|
av_freep(&s->samples_flt[1]);
|
||||||
|
|
||||||
ff_af_queue_close(&s->afq);
|
ff_af_queue_close(&s->afq);
|
||||||
|
|
||||||
@ -126,93 +128,63 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* sample format */
|
/* allocate float sample buffers */
|
||||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32 ||
|
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
|
||||||
avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
|
|
||||||
int ch;
|
int ch;
|
||||||
for (ch = 0; ch < avctx->channels; ch++) {
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
s->planar_samples[ch] = av_malloc(avctx->frame_size *
|
s->samples_flt[ch] = av_malloc(avctx->frame_size *
|
||||||
av_get_bytes_per_sample(avctx->sample_fmt));
|
sizeof(*s->samples_flt[ch]));
|
||||||
if (!s->planar_samples[ch]) {
|
if (!s->samples_flt[ch]) {
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
error:
|
||||||
mp3lame_encode_close(avctx);
|
mp3lame_encode_close(avctx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEINTERLEAVE(type, scale) do { \
|
#define ENCODE_BUFFER(func, buf_type, buf_name) do { \
|
||||||
int ch, i; \
|
lame_result = func(s->gfp, \
|
||||||
for (ch = 0; ch < s->avctx->channels; ch++) { \
|
(const buf_type *)buf_name[0], \
|
||||||
const type *input = samples; \
|
(const buf_type *)buf_name[1], frame->nb_samples, \
|
||||||
type *output = s->planar_samples[ch]; \
|
s->buffer + s->buffer_index, \
|
||||||
input += ch; \
|
BUFFER_SIZE - s->buffer_index); \
|
||||||
for (i = 0; i < nb_samples; i++) { \
|
|
||||||
output[i] = *input * scale; \
|
|
||||||
input += s->avctx->channels; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static int encode_frame_int16(LAMEContext *s, void *samples, int nb_samples)
|
|
||||||
{
|
|
||||||
if (s->avctx->channels > 1) {
|
|
||||||
return lame_encode_buffer_interleaved(s->gfp, samples,
|
|
||||||
nb_samples,
|
|
||||||
s->buffer + s->buffer_index,
|
|
||||||
BUFFER_SIZE - s->buffer_index);
|
|
||||||
} else {
|
|
||||||
return lame_encode_buffer(s->gfp, samples, NULL, nb_samples,
|
|
||||||
s->buffer + s->buffer_index,
|
|
||||||
BUFFER_SIZE - s->buffer_index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int encode_frame_int32(LAMEContext *s, void *samples, int nb_samples)
|
|
||||||
{
|
|
||||||
DEINTERLEAVE(int32_t, 1);
|
|
||||||
|
|
||||||
return lame_encode_buffer_int(s->gfp,
|
|
||||||
s->planar_samples[0], s->planar_samples[1],
|
|
||||||
nb_samples,
|
|
||||||
s->buffer + s->buffer_index,
|
|
||||||
BUFFER_SIZE - s->buffer_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int encode_frame_float(LAMEContext *s, void *samples, int nb_samples)
|
|
||||||
{
|
|
||||||
DEINTERLEAVE(float, 32768.0f);
|
|
||||||
|
|
||||||
return lame_encode_buffer_float(s->gfp,
|
|
||||||
s->planar_samples[0], s->planar_samples[1],
|
|
||||||
nb_samples,
|
|
||||||
s->buffer + s->buffer_index,
|
|
||||||
BUFFER_SIZE - s->buffer_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||||
const AVFrame *frame, int *got_packet_ptr)
|
const AVFrame *frame, int *got_packet_ptr)
|
||||||
{
|
{
|
||||||
LAMEContext *s = avctx->priv_data;
|
LAMEContext *s = avctx->priv_data;
|
||||||
MPADecodeHeader hdr;
|
MPADecodeHeader hdr;
|
||||||
int len, ret;
|
int len, ret, ch;
|
||||||
int lame_result;
|
int lame_result;
|
||||||
|
|
||||||
if (frame) {
|
if (frame) {
|
||||||
switch (avctx->sample_fmt) {
|
switch (avctx->sample_fmt) {
|
||||||
case AV_SAMPLE_FMT_S16:
|
case AV_SAMPLE_FMT_S16P:
|
||||||
lame_result = encode_frame_int16(s, frame->data[0], frame->nb_samples);
|
ENCODE_BUFFER(lame_encode_buffer, int16_t, frame->data);
|
||||||
break;
|
break;
|
||||||
case AV_SAMPLE_FMT_S32:
|
case AV_SAMPLE_FMT_S32P:
|
||||||
lame_result = encode_frame_int32(s, frame->data[0], frame->nb_samples);
|
ENCODE_BUFFER(lame_encode_buffer_int, int32_t, frame->data);
|
||||||
break;
|
break;
|
||||||
case AV_SAMPLE_FMT_FLT:
|
case AV_SAMPLE_FMT_FLTP:
|
||||||
lame_result = encode_frame_float(s, frame->data[0], frame->nb_samples);
|
if (frame->linesize[0] < 4 * FFALIGN(frame->nb_samples, 8)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "inadequate AVFrame plane padding\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
|
s->dsp.vector_fmul_scalar(s->samples_flt[ch],
|
||||||
|
(const float *)frame->data[ch],
|
||||||
|
32768.0f,
|
||||||
|
FFALIGN(frame->nb_samples, 8));
|
||||||
|
}
|
||||||
|
ENCODE_BUFFER(lame_encode_buffer_float, float, s->samples_flt);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return AVERROR_BUG;
|
return AVERROR_BUG;
|
||||||
@ -300,9 +272,9 @@ AVCodec ff_libmp3lame_encoder = {
|
|||||||
.encode2 = mp3lame_encode_frame,
|
.encode2 = mp3lame_encode_frame,
|
||||||
.close = mp3lame_encode_close,
|
.close = mp3lame_encode_close,
|
||||||
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
|
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
|
||||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32,
|
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
|
||||||
AV_SAMPLE_FMT_FLT,
|
AV_SAMPLE_FMT_FLTP,
|
||||||
AV_SAMPLE_FMT_S16,
|
AV_SAMPLE_FMT_S16P,
|
||||||
AV_SAMPLE_FMT_NONE },
|
AV_SAMPLE_FMT_NONE },
|
||||||
.supported_samplerates = libmp3lame_sample_rates,
|
.supported_samplerates = libmp3lame_sample_rates,
|
||||||
.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
|
.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user