1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-10 06:10:52 +02:00

avfilter/avf_showfreqs: stop using audio fifo

Also stop rewriting pts.
This commit is contained in:
Paul B Mahol
2022-02-23 16:43:29 +01:00
parent 9cf652cef4
commit d27e1cb633

View File

@@ -22,7 +22,6 @@
#include <math.h> #include <math.h>
#include "libavutil/tx.h" #include "libavutil/tx.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/channel_layout.h" #include "libavutil/channel_layout.h"
@@ -56,6 +55,7 @@ typedef struct ShowFreqsContext {
av_tx_fn tx_fn; av_tx_fn tx_fn;
AVComplexFloat **fft_input; AVComplexFloat **fft_input;
AVComplexFloat **fft_data; AVComplexFloat **fft_data;
AVFrame *window;
float **avg_data; float **avg_data;
float *window_func_lut; float *window_func_lut;
float overlap; float overlap;
@@ -66,8 +66,6 @@ typedef struct ShowFreqsContext {
int win_size; int win_size;
float scale; float scale;
char *colors; char *colors;
AVAudioFifo *fifo;
int64_t pts;
} ShowFreqsContext; } ShowFreqsContext;
#define OFFSET(x) offsetof(ShowFreqsContext, x) #define OFFSET(x) offsetof(ShowFreqsContext, x)
@@ -138,15 +136,6 @@ static int query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static av_cold int init(AVFilterContext *ctx)
{
ShowFreqsContext *s = ctx->priv;
s->pts = AV_NOPTS_VALUE;
return 0;
}
static int config_output(AVFilterLink *outlink) static int config_output(AVFilterLink *outlink)
{ {
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
@@ -157,7 +146,6 @@ static int config_output(AVFilterLink *outlink)
s->nb_freq = s->fft_size / 2; s->nb_freq = s->fft_size / 2;
s->win_size = s->fft_size; s->win_size = s->fft_size;
av_audio_fifo_free(s->fifo);
av_tx_uninit(&s->fft); av_tx_uninit(&s->fft);
ret = av_tx_init(&s->fft, &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->fft_size, &scale, 0); ret = av_tx_init(&s->fft, &s->tx_fn, AV_TX_FLOAT_FFT, 0, s->fft_size, &scale, 0);
if (ret < 0) { if (ret < 0) {
@@ -214,14 +202,16 @@ static int config_output(AVFilterLink *outlink)
s->scale += s->window_func_lut[i] * s->window_func_lut[i]; s->scale += s->window_func_lut[i] * s->window_func_lut[i];
} }
outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap)); s->window = ff_get_audio_buffer(inlink, s->win_size * 2);
if (!s->window)
return AVERROR(ENOMEM);
outlink->frame_rate = av_make_q(inlink->sample_rate, s->hop_size);
outlink->time_base = av_inv_q(outlink->frame_rate);
outlink->sample_aspect_ratio = (AVRational){1,1}; outlink->sample_aspect_ratio = (AVRational){1,1};
outlink->w = s->w; outlink->w = s->w;
outlink->h = s->h; outlink->h = s->h;
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
if (!s->fifo)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@@ -349,11 +339,12 @@ static inline void plot_freq(ShowFreqsContext *s, int ch,
} }
} }
static int plot_freqs(AVFilterLink *inlink, AVFrame *in) static int plot_freqs(AVFilterLink *inlink, int64_t pts)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
ShowFreqsContext *s = ctx->priv; ShowFreqsContext *s = ctx->priv;
AVFrame *in = s->window;
const int win_size = s->win_size; const int win_size = s->win_size;
char *colors, *color, *saveptr = NULL; char *colors, *color, *saveptr = NULL;
AVFrame *out; AVFrame *out;
@@ -370,14 +361,10 @@ static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
for (ch = 0; ch < s->nb_channels; ch++) { for (ch = 0; ch < s->nb_channels; ch++) {
const float *p = (float *)in->extended_data[ch]; const float *p = (float *)in->extended_data[ch];
for (n = 0; n < in->nb_samples; n++) { for (n = 0; n < win_size; n++) {
s->fft_input[ch][n].re = p[n] * s->window_func_lut[n]; s->fft_input[ch][n].re = p[n] * s->window_func_lut[n];
s->fft_input[ch][n].im = 0; s->fft_input[ch][n].im = 0;
} }
for (; n < win_size; n++) {
s->fft_input[ch][n].re = 0;
s->fft_input[ch][n].im = 0;
}
} }
/* run FFT on each samples set */ /* run FFT on each samples set */
@@ -440,37 +427,29 @@ static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
} }
av_free(colors); av_free(colors);
out->pts = in->pts; out->pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
out->sample_aspect_ratio = (AVRational){1,1}; out->sample_aspect_ratio = (AVRational){1,1};
return ff_filter_frame(outlink, out); return ff_filter_frame(outlink, out);
} }
static int filter_frame(AVFilterLink *inlink) static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ShowFreqsContext *s = ctx->priv; ShowFreqsContext *s = ctx->priv;
AVFrame *fin = NULL; const int offset = s->win_size - s->hop_size;
int ret = 0; int64_t pts = in->pts;
fin = ff_get_audio_buffer(inlink, s->win_size); for (int ch = 0; ch < in->channels; ch++) {
if (!fin) { float *dst = (float *)s->window->extended_data[ch];
ret = AVERROR(ENOMEM);
goto fail; memmove(dst, &dst[s->hop_size], offset * sizeof(float));
memcpy(&dst[offset], in->extended_data[ch], in->nb_samples * sizeof(float));
memset(&dst[offset + in->nb_samples], 0, (s->hop_size - in->nb_samples) * sizeof(float));
} }
fin->pts = s->pts; av_frame_free(&in);
s->pts += s->hop_size;
ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
if (ret < 0)
goto fail;
ret = plot_freqs(inlink, fin); return plot_freqs(inlink, pts);
av_frame_free(&fin);
av_audio_fifo_drain(s->fifo, s->hop_size);
fail:
av_frame_free(&fin);
return ret;
} }
static int activate(AVFilterContext *ctx) static int activate(AVFilterContext *ctx)
@@ -478,27 +457,19 @@ static int activate(AVFilterContext *ctx)
AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
ShowFreqsContext *s = ctx->priv; ShowFreqsContext *s = ctx->priv;
AVFrame *in = NULL; AVFrame *in;
int ret = 0; int ret;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (av_audio_fifo_size(s->fifo) < s->win_size) ret = ff_inlink_consume_samples(inlink, s->hop_size, s->hop_size, &in);
ret = ff_inlink_consume_samples(inlink, s->win_size, s->win_size, &in);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret > 0) {
av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples);
if (s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
av_frame_free(&in);
}
if (av_audio_fifo_size(s->fifo) >= s->win_size) { if (ret > 0)
ret = filter_frame(inlink); ret = filter_frame(inlink, in);
if (ret <= 0) if (ret < 0)
return ret; return ret;
}
FF_FILTER_FORWARD_STATUS(inlink, outlink); FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_WANTED(outlink, inlink); FF_FILTER_FORWARD_WANTED(outlink, inlink);
@@ -524,7 +495,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&s->fft_data); av_freep(&s->fft_data);
av_freep(&s->avg_data); av_freep(&s->avg_data);
av_freep(&s->window_func_lut); av_freep(&s->window_func_lut);
av_audio_fifo_free(s->fifo); av_frame_free(&s->window);
} }
static const AVFilterPad showfreqs_inputs[] = { static const AVFilterPad showfreqs_inputs[] = {
@@ -545,7 +516,6 @@ static const AVFilterPad showfreqs_outputs[] = {
const AVFilter ff_avf_showfreqs = { const AVFilter ff_avf_showfreqs = {
.name = "showfreqs", .name = "showfreqs",
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."), .description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."),
.init = init,
.uninit = uninit, .uninit = uninit,
.priv_size = sizeof(ShowFreqsContext), .priv_size = sizeof(ShowFreqsContext),
.activate = activate, .activate = activate,