1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avfilter/af_surround: switch to activate

This commit is contained in:
Paul B Mahol 2019-04-23 12:40:04 +02:00
parent 7a128ac2bc
commit ce15c3a4c8

View File

@ -24,6 +24,8 @@
#include "libavcodec/avfft.h" #include "libavcodec/avfft.h"
#include "avfilter.h" #include "avfilter.h"
#include "audio.h" #include "audio.h"
#include "filters.h"
#include "internal.h"
#include "formats.h" #include "formats.h"
#include "window_func.h" #include "window_func.h"
@ -90,6 +92,7 @@ typedef struct AudioSurroundContext {
float *window_func_lut; float *window_func_lut;
int64_t pts; int64_t pts;
int eof;
void (*filter)(AVFilterContext *ctx); void (*filter)(AVFilterContext *ctx);
void (*upmix_stereo)(AVFilterContext *ctx, void (*upmix_stereo)(AVFilterContext *ctx,
@ -1491,70 +1494,91 @@ static int ifft_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
return 0; return 0;
} }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) static int filter_frame(AVFilterLink *inlink)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
AudioSurroundContext *s = ctx->priv; AudioSurroundContext *s = ctx->priv;
AVFrame *out;
int ret; int ret;
ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data, ret = av_audio_fifo_peek(s->fifo, (void **)s->input->extended_data, s->buf_size);
in->nb_samples);
if (ret >= 0 && s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
av_frame_free(&in);
if (ret < 0) if (ret < 0)
return ret; return ret;
while (av_audio_fifo_size(s->fifo) >= s->buf_size) { ctx->internal->execute(ctx, fft_channel, NULL, NULL, inlink->channels);
AVFrame *out;
ret = av_audio_fifo_peek(s->fifo, (void **)s->input->extended_data, s->buf_size); s->filter(ctx);
if (ret < 0)
return ret;
ctx->internal->execute(ctx, fft_channel, NULL, NULL, inlink->channels); out = ff_get_audio_buffer(outlink, s->hop_size);
if (!out)
return AVERROR(ENOMEM);
s->filter(ctx); ctx->internal->execute(ctx, ifft_channel, out, NULL, outlink->channels);
out = ff_get_audio_buffer(outlink, s->hop_size); out->pts = s->pts;
if (!out) if (s->pts != AV_NOPTS_VALUE)
return AVERROR(ENOMEM); s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
av_audio_fifo_drain(s->fifo, FFMIN(av_audio_fifo_size(s->fifo), s->hop_size));
ctx->internal->execute(ctx, ifft_channel, out, NULL, outlink->channels); return ff_filter_frame(outlink, out);
out->pts = s->pts;
if (s->pts != AV_NOPTS_VALUE)
s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
av_audio_fifo_drain(s->fifo, s->hop_size);
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
}
return 0;
} }
static int request_frame(AVFilterLink *outlink) static int activate(AVFilterContext *ctx)
{ {
AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioSurroundContext *s = ctx->priv; AudioSurroundContext *s = ctx->priv;
int ret = 0; AVFrame *in = NULL;
int ret = 0, status;
int64_t pts;
ret = ff_request_frame(ctx->inputs[0]); FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (ret == AVERROR_EOF && av_audio_fifo_size(s->fifo) > 0 && av_audio_fifo_size(s->fifo) < s->buf_size) { if (!s->eof && av_audio_fifo_size(s->fifo) < s->buf_size) {
AVFrame *in; ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret;
in = ff_get_audio_buffer(outlink, s->buf_size - av_audio_fifo_size(s->fifo)); if (ret > 0) {
if (!in) ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data,
return AVERROR(ENOMEM); in->nb_samples);
ret = filter_frame(ctx->inputs[0], in); if (ret >= 0 && s->pts == AV_NOPTS_VALUE)
av_audio_fifo_drain(s->fifo, s->buf_size); s->pts = in->pts;
av_frame_free(&in);
if (ret < 0)
return ret;
}
} }
return ret; if ((av_audio_fifo_size(s->fifo) >= s->buf_size) ||
(av_audio_fifo_size(s->fifo) > 0 && s->eof)) {
ret = filter_frame(inlink);
if (av_audio_fifo_size(s->fifo) >= s->buf_size)
ff_filter_set_ready(ctx, 100);
return ret;
}
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF) {
s->eof = 1;
if (av_audio_fifo_size(s->fifo) >= 0) {
ff_filter_set_ready(ctx, 100);
return 0;
}
}
}
if (s->eof && av_audio_fifo_size(s->fifo) <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
} }
static av_cold void uninit(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx)
@ -1649,7 +1673,6 @@ static const AVFilterPad inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input, .config_props = config_input,
}, },
{ NULL } { NULL }
@ -1659,7 +1682,6 @@ static const AVFilterPad outputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_output, .config_props = config_output,
}, },
{ NULL } { NULL }
@ -1673,6 +1695,7 @@ AVFilter ff_af_surround = {
.priv_class = &surround_class, .priv_class = &surround_class,
.init = init, .init = init,
.uninit = uninit, .uninit = uninit,
.activate = activate,
.inputs = inputs, .inputs = inputs,
.outputs = outputs, .outputs = outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS, .flags = AVFILTER_FLAG_SLICE_THREADS,