2011-11-06 23:28:05 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
|
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* Audio merging filter
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "libswresample/swresample.h" // only for SWR_CH_MAX
|
|
|
|
#include "avfilter.h"
|
2012-05-12 18:58:14 +03:00
|
|
|
#include "audio.h"
|
2012-05-31 22:47:10 +03:00
|
|
|
#include "bufferqueue.h"
|
2011-11-06 23:28:05 +03:00
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
int nb_in_ch[2]; /**< number of channels for each input */
|
|
|
|
int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
|
|
|
|
int bps;
|
2012-05-31 22:47:10 +03:00
|
|
|
struct amerge_input {
|
|
|
|
struct FFBufQueue queue;
|
|
|
|
int nb_samples;
|
|
|
|
int pos;
|
|
|
|
} in[2];
|
2011-11-06 23:28:05 +03:00
|
|
|
} AMergeContext;
|
|
|
|
|
|
|
|
static av_cold void uninit(AVFilterContext *ctx)
|
|
|
|
{
|
|
|
|
AMergeContext *am = ctx->priv;
|
2012-05-31 22:47:10 +03:00
|
|
|
int i;
|
2011-11-06 23:28:05 +03:00
|
|
|
|
|
|
|
for (i = 0; i < 2; i++)
|
2012-05-31 22:47:10 +03:00
|
|
|
ff_bufqueue_discard_all(&am->in[i].queue);
|
2011-11-06 23:28:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int query_formats(AVFilterContext *ctx)
|
|
|
|
{
|
|
|
|
AMergeContext *am = ctx->priv;
|
|
|
|
int64_t inlayout[2], outlayout;
|
|
|
|
AVFilterFormats *formats;
|
2012-05-16 03:27:31 +03:00
|
|
|
AVFilterChannelLayouts *layouts;
|
2011-11-06 23:28:05 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
2012-05-16 03:27:31 +03:00
|
|
|
if (!ctx->inputs[i]->in_channel_layouts ||
|
|
|
|
!ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
|
2011-11-06 23:28:05 +03:00
|
|
|
av_log(ctx, AV_LOG_ERROR,
|
|
|
|
"No channel layout for input %d\n", i + 1);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
2012-05-16 03:27:31 +03:00
|
|
|
inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
|
|
|
|
if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
|
2011-11-06 23:28:05 +03:00
|
|
|
char buf[256];
|
|
|
|
av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
|
|
|
|
av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
|
|
|
|
}
|
|
|
|
am->nb_in_ch[i] = av_get_channel_layout_nb_channels(inlayout[i]);
|
|
|
|
}
|
|
|
|
if (am->nb_in_ch[0] + am->nb_in_ch[1] > SWR_CH_MAX) {
|
|
|
|
av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
if (inlayout[0] & inlayout[1]) {
|
|
|
|
av_log(ctx, AV_LOG_WARNING,
|
|
|
|
"Inputs overlap: output layout will be meaningless\n");
|
|
|
|
for (i = 0; i < am->nb_in_ch[0] + am->nb_in_ch[1]; i++)
|
|
|
|
am->route[i] = i;
|
|
|
|
outlayout = av_get_default_channel_layout(am->nb_in_ch[0] +
|
|
|
|
am->nb_in_ch[1]);
|
|
|
|
if (!outlayout)
|
|
|
|
outlayout = ((int64_t)1 << (am->nb_in_ch[0] + am->nb_in_ch[1])) - 1;
|
|
|
|
} else {
|
|
|
|
int *route[2] = { am->route, am->route + am->nb_in_ch[0] };
|
|
|
|
int c, out_ch_number = 0;
|
|
|
|
|
|
|
|
outlayout = inlayout[0] | inlayout[1];
|
|
|
|
for (c = 0; c < 64; c++)
|
|
|
|
for (i = 0; i < 2; i++)
|
|
|
|
if ((inlayout[i] >> c) & 1)
|
|
|
|
*(route[i]++) = out_ch_number++;
|
|
|
|
}
|
2012-05-12 18:38:47 +03:00
|
|
|
formats = avfilter_make_format_list(ff_packed_sample_fmts);
|
2011-11-06 23:28:05 +03:00
|
|
|
avfilter_set_common_sample_formats(ctx, formats);
|
|
|
|
for (i = 0; i < 2; i++) {
|
2012-05-16 03:27:31 +03:00
|
|
|
layouts = NULL;
|
|
|
|
ff_add_channel_layout(&layouts, inlayout[i]);
|
|
|
|
ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
|
2011-11-06 23:28:05 +03:00
|
|
|
}
|
2012-05-16 03:27:31 +03:00
|
|
|
layouts = NULL;
|
|
|
|
ff_add_channel_layout(&layouts, outlayout);
|
|
|
|
ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
|
2012-05-23 13:58:15 +03:00
|
|
|
ff_set_common_samplerates(ctx, ff_all_samplerates());
|
2011-11-06 23:28:05 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int config_output(AVFilterLink *outlink)
|
|
|
|
{
|
|
|
|
AVFilterContext *ctx = outlink->src;
|
|
|
|
AMergeContext *am = ctx->priv;
|
|
|
|
int64_t layout;
|
|
|
|
char name[3][256];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
|
|
|
|
av_log(ctx, AV_LOG_ERROR,
|
|
|
|
"Inputs must have the same sample rate "
|
|
|
|
"(%"PRIi64" vs %"PRIi64")\n",
|
|
|
|
ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
|
|
|
|
outlink->sample_rate = ctx->inputs[0]->sample_rate;
|
|
|
|
outlink->time_base = ctx->inputs[0]->time_base;
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
layout = (i < 2 ? ctx->inputs[i] : ctx->outputs[0])->channel_layout;
|
|
|
|
av_get_channel_layout_string(name[i], sizeof(name[i]), -1, layout);
|
|
|
|
}
|
|
|
|
av_log(ctx, AV_LOG_INFO,
|
|
|
|
"in1:%s + in2:%s -> out:%s\n", name[0], name[1], name[2]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int request_frame(AVFilterLink *outlink)
|
|
|
|
{
|
|
|
|
AVFilterContext *ctx = outlink->src;
|
|
|
|
AMergeContext *am = ctx->priv;
|
2012-04-26 18:43:00 +03:00
|
|
|
int i, ret;
|
2011-11-06 23:28:05 +03:00
|
|
|
|
|
|
|
for (i = 0; i < 2; i++)
|
2012-05-31 22:47:10 +03:00
|
|
|
if (!am->in[i].nb_samples)
|
2012-04-26 18:43:00 +03:00
|
|
|
if ((ret = avfilter_request_frame(ctx->inputs[i])) < 0)
|
|
|
|
return ret;
|
2011-11-06 23:28:05 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Copy samples from two input streams to one output stream.
|
|
|
|
* @param nb_in_ch number of channels in each input stream
|
|
|
|
* @param route routing values;
|
|
|
|
* input channel i goes to output channel route[i];
|
|
|
|
* i < nb_in_ch[0] are the channels from the first output;
|
|
|
|
* i >= nb_in_ch[0] are the channels from the second output
|
|
|
|
* @param ins pointer to the samples of each inputs, in packed format;
|
|
|
|
* will be left at the end of the copied samples
|
|
|
|
* @param outs pointer to the samples of the output, in packet format;
|
|
|
|
* must point to a buffer big enough;
|
|
|
|
* will be left at the end of the copied samples
|
|
|
|
* @param ns number of samples to copy
|
|
|
|
* @param bps bytes per sample
|
|
|
|
*/
|
|
|
|
static inline void copy_samples(int nb_in_ch[2], int *route, uint8_t *ins[2],
|
|
|
|
uint8_t **outs, int ns, int bps)
|
|
|
|
{
|
|
|
|
int *route_cur;
|
|
|
|
int i, c;
|
|
|
|
|
|
|
|
while (ns--) {
|
|
|
|
route_cur = route;
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
for (c = 0; c < nb_in_ch[i]; c++) {
|
|
|
|
memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
|
|
|
|
ins[i] += bps;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*outs += (nb_in_ch[0] + nb_in_ch[1]) * bps;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|
|
|
{
|
|
|
|
AVFilterContext *ctx = inlink->dst;
|
|
|
|
AMergeContext *am = ctx->priv;
|
2012-02-01 17:53:54 +03:00
|
|
|
AVFilterLink *const outlink = ctx->outputs[0];
|
2011-11-06 23:28:05 +03:00
|
|
|
int input_number = inlink == ctx->inputs[1];
|
|
|
|
int nb_samples, ns, i;
|
2012-05-31 22:47:10 +03:00
|
|
|
AVFilterBufferRef *outbuf, *inbuf[2];
|
2011-11-06 23:28:05 +03:00
|
|
|
uint8_t *ins[2], *outs;
|
|
|
|
|
2012-05-31 22:47:10 +03:00
|
|
|
ff_bufqueue_add(ctx, &am->in[input_number].queue, insamples);
|
|
|
|
am->in[input_number].nb_samples += insamples->audio->nb_samples;
|
|
|
|
if (!am->in[!input_number].nb_samples)
|
2011-11-06 23:28:05 +03:00
|
|
|
return;
|
|
|
|
|
2012-05-31 22:47:10 +03:00
|
|
|
nb_samples = FFMIN(am->in[0].nb_samples,
|
|
|
|
am->in[1].nb_samples);
|
|
|
|
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples);
|
2011-11-06 23:28:05 +03:00
|
|
|
outs = outbuf->data[0];
|
|
|
|
for (i = 0; i < 2; i++) {
|
2012-05-31 22:47:10 +03:00
|
|
|
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
|
|
|
|
ins[i] = inbuf[i]->data[0] +
|
|
|
|
am->in[i].pos * am->nb_in_ch[i] * am->bps;
|
2011-11-06 23:28:05 +03:00
|
|
|
}
|
2012-05-31 22:47:10 +03:00
|
|
|
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
|
|
|
|
inbuf[0]->pts +
|
|
|
|
av_rescale_q(am->in[0].pos,
|
2012-05-23 14:14:27 +03:00
|
|
|
(AVRational){ 1, ctx->inputs[0]->sample_rate },
|
|
|
|
ctx->outputs[0]->time_base);
|
2012-02-01 17:53:54 +03:00
|
|
|
|
2012-05-31 22:47:10 +03:00
|
|
|
avfilter_copy_buffer_ref_props(outbuf, inbuf[0]);
|
2012-02-01 17:53:54 +03:00
|
|
|
outbuf->audio->nb_samples = nb_samples;
|
|
|
|
outbuf->audio->channel_layout = outlink->channel_layout;
|
|
|
|
|
2011-11-06 23:28:05 +03:00
|
|
|
while (nb_samples) {
|
|
|
|
ns = nb_samples;
|
|
|
|
for (i = 0; i < 2; i++)
|
2012-05-31 22:47:10 +03:00
|
|
|
ns = FFMIN(ns, inbuf[i]->audio->nb_samples - am->in[i].pos);
|
2011-11-06 23:28:05 +03:00
|
|
|
/* Unroll the most common sample formats: speed +~350% for the loop,
|
|
|
|
+~13% overall (including two common decoders) */
|
|
|
|
switch (am->bps) {
|
|
|
|
case 1:
|
|
|
|
copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 2);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 4);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, am->bps);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_samples -= ns;
|
|
|
|
for (i = 0; i < 2; i++) {
|
2012-05-31 22:47:10 +03:00
|
|
|
am->in[i].nb_samples -= ns;
|
|
|
|
am->in[i].pos += ns;
|
|
|
|
if (am->in[i].pos == inbuf[i]->audio->nb_samples) {
|
|
|
|
am->in[i].pos = 0;
|
|
|
|
avfilter_unref_buffer(inbuf[i]);
|
|
|
|
ff_bufqueue_get(&am->in[i].queue);
|
|
|
|
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
|
|
|
|
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
|
2011-11-06 23:28:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-05-10 23:41:29 +03:00
|
|
|
ff_filter_samples(ctx->outputs[0], outbuf);
|
2011-11-06 23:28:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
AVFilter avfilter_af_amerge = {
|
|
|
|
.name = "amerge",
|
|
|
|
.description = NULL_IF_CONFIG_SMALL("Merge two audio streams into "
|
|
|
|
"a single multi-channel stream."),
|
|
|
|
.priv_size = sizeof(AMergeContext),
|
|
|
|
.uninit = uninit,
|
|
|
|
.query_formats = query_formats,
|
|
|
|
|
|
|
|
.inputs = (const AVFilterPad[]) {
|
|
|
|
{ .name = "in1",
|
|
|
|
.type = AVMEDIA_TYPE_AUDIO,
|
|
|
|
.filter_samples = filter_samples,
|
2012-05-31 22:47:10 +03:00
|
|
|
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
|
2011-11-06 23:28:05 +03:00
|
|
|
{ .name = "in2",
|
|
|
|
.type = AVMEDIA_TYPE_AUDIO,
|
|
|
|
.filter_samples = filter_samples,
|
2012-05-31 22:47:10 +03:00
|
|
|
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
|
2011-11-06 23:28:05 +03:00
|
|
|
{ .name = NULL }
|
|
|
|
},
|
|
|
|
.outputs = (const AVFilterPad[]) {
|
|
|
|
{ .name = "default",
|
|
|
|
.type = AVMEDIA_TYPE_AUDIO,
|
|
|
|
.config_props = config_output,
|
|
|
|
.request_frame = request_frame, },
|
|
|
|
{ .name = NULL }
|
|
|
|
},
|
|
|
|
};
|