1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-01-08 13:22:53 +02:00

lavfi: replace filter_samples by filter_frame

Based on patch by Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-11-28 13:53:48 +01:00
parent 16af29a7a6
commit cd7febd33f
41 changed files with 134 additions and 134 deletions

View File

@ -52,7 +52,7 @@ Buffer references ownership and permissions
point to only a part of a video buffer. point to only a part of a video buffer.
A reference is usually obtained as input to the start_frame or A reference is usually obtained as input to the start_frame or
filter_samples method or requested using the ff_get_video_buffer or filter_frame method or requested using the ff_get_video_buffer or
ff_get_audio_buffer functions. A new reference on an existing buffer can ff_get_audio_buffer functions. A new reference on an existing buffer can
be created with the avfilter_ref_buffer. A reference is destroyed using be created with the avfilter_ref_buffer. A reference is destroyed using
the avfilter_unref_bufferp function. the avfilter_unref_bufferp function.
@ -68,14 +68,14 @@ Buffer references ownership and permissions
Here are the (fairly obvious) rules for reference ownership: Here are the (fairly obvious) rules for reference ownership:
* A reference received by the start_frame or filter_samples method * A reference received by the start_frame or filter_frame method
belong to the corresponding filter. belong to the corresponding filter.
Special exception: for video references: the reference may be used Special exception: for video references: the reference may be used
internally for automatic copying and must not be destroyed before internally for automatic copying and must not be destroyed before
end_frame; it can be given away to ff_start_frame. end_frame; it can be given away to ff_start_frame.
* A reference passed to ff_start_frame or ff_filter_samples is given * A reference passed to ff_start_frame or ff_filter_frame is given
away and must no longer be used. away and must no longer be used.
* A reference created with avfilter_ref_buffer belongs to the code that * A reference created with avfilter_ref_buffer belongs to the code that
@ -93,16 +93,16 @@ Buffer references ownership and permissions
The AVFilterLink structure has a few AVFilterBufferRef fields. Here are The AVFilterLink structure has a few AVFilterBufferRef fields. Here are
the rules to handle them: the rules to handle them:
* cur_buf is set before the start_frame and filter_samples methods to * cur_buf is set before the start_frame and filter_frame methods to
the same reference given as argument to the methods and belongs to the the same reference given as argument to the methods and belongs to the
destination filter of the link. If it has not been cleared after destination filter of the link. If it has not been cleared after
end_frame or filter_samples, libavfilter will automatically destroy end_frame or filter_frame, libavfilter will automatically destroy
the reference; therefore, any filter that needs to keep the reference the reference; therefore, any filter that needs to keep the reference
for longer must set cur_buf to NULL. for longer must set cur_buf to NULL.
* out_buf belongs to the source filter of the link and can be used to * out_buf belongs to the source filter of the link and can be used to
store a reference to the buffer that has been sent to the destination. store a reference to the buffer that has been sent to the destination.
If it is not NULL after end_frame or filter_samples, libavfilter will If it is not NULL after end_frame or filter_frame, libavfilter will
automatically destroy the reference. automatically destroy the reference.
If a video input pad does not have a start_frame method, the default If a video input pad does not have a start_frame method, the default
@ -179,7 +179,7 @@ Buffer references ownership and permissions
with the WRITE permission. with the WRITE permission.
* Filters that intend to keep a reference after the filtering process * Filters that intend to keep a reference after the filtering process
is finished (after end_frame or filter_samples returns) must have the is finished (after end_frame or filter_frame returns) must have the
PRESERVE permission on it and remove the WRITE permission if they PRESERVE permission on it and remove the WRITE permission if they
create a new reference to give it away. create a new reference to give it away.
@ -198,7 +198,7 @@ Frame scheduling
Simple filters that output one frame for each input frame should not have Simple filters that output one frame for each input frame should not have
to worry about it. to worry about it.
start_frame / filter_samples start_frame / filter_frame
---------------------------- ----------------------------
These methods are called when a frame is pushed to the filter's input. These methods are called when a frame is pushed to the filter's input.
@ -233,7 +233,7 @@ Frame scheduling
This method is called when a frame is wanted on an output. This method is called when a frame is wanted on an output.
For an input, it should directly call start_frame or filter_samples on For an input, it should directly call start_frame or filter_frame on
the corresponding output. the corresponding output.
For a filter, if there are queued frames already ready, one of these For a filter, if there are queued frames already ready, one of these
@ -266,4 +266,4 @@ Frame scheduling
Note that, except for filters that can have queued frames, request_frame Note that, except for filters that can have queued frames, request_frame
does not push frames: it requests them to its input, and as a reaction, does not push frames: it requests them to its input, and as a reaction,
the start_frame / filter_samples method will be called and do the work. the start_frame / filter_frame method will be called and do the work.

View File

@ -135,7 +135,7 @@ static int config_output(AVFilterLink *outlink)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
{ {
AConvertContext *aconvert = inlink->dst->priv; AConvertContext *aconvert = inlink->dst->priv;
const int n = insamplesref->audio->nb_samples; const int n = insamplesref->audio->nb_samples;
@ -149,7 +149,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
outsamplesref->audio->channel_layout = outlink->channel_layout; outsamplesref->audio->channel_layout = outlink->channel_layout;
ret = ff_filter_samples(outlink, outsamplesref); ret = ff_filter_frame(outlink, outsamplesref);
avfilter_unref_buffer(insamplesref); avfilter_unref_buffer(insamplesref);
return ret; return ret;
} }
@ -164,7 +164,7 @@ AVFilter avfilter_af_aconvert = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -217,7 +217,7 @@ static inline void copy_samples(int nb_inputs, struct amerge_input in[],
} }
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AMergeContext *am = ctx->priv; AMergeContext *am = ctx->priv;
@ -290,7 +290,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
} }
} }
} }
return ff_filter_samples(ctx->outputs[0], outbuf); return ff_filter_frame(ctx->outputs[0], outbuf);
} }
static av_cold int init(AVFilterContext *ctx, const char *args) static av_cold int init(AVFilterContext *ctx, const char *args)
@ -313,7 +313,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
AVFilterPad pad = { AVFilterPad pad = {
.name = name, .name = name,
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
}; };
if (!name) if (!name)

View File

@ -309,7 +309,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
if (s->next_pts != AV_NOPTS_VALUE) if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples; s->next_pts += nb_samples;
return ff_filter_samples(outlink, out_buf); return ff_filter_frame(outlink, out_buf);
} }
/** /**
@ -450,7 +450,7 @@ static int request_frame(AVFilterLink *outlink)
return output_frame(outlink, available_samples); return output_frame(outlink, available_samples);
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv; MixContext *s = ctx->priv;
@ -502,7 +502,7 @@ static int init(AVFilterContext *ctx, const char *args)
snprintf(name, sizeof(name), "input%d", i); snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO; pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name); pad.name = av_strdup(name);
pad.filter_samples = filter_samples; pad.filter_frame = filter_frame;
ff_insert_inpad(ctx, i, &pad); ff_insert_inpad(ctx, i, &pad);
} }

View File

@ -170,7 +170,7 @@ static int config_output(AVFilterLink *outlink)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
{ {
AResampleContext *aresample = inlink->dst->priv; AResampleContext *aresample = inlink->dst->priv;
const int n_in = insamplesref->audio->nb_samples; const int n_in = insamplesref->audio->nb_samples;
@ -205,7 +205,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
outsamplesref->audio->nb_samples = n_out; outsamplesref->audio->nb_samples = n_out;
ret = ff_filter_samples(outlink, outsamplesref); ret = ff_filter_frame(outlink, outsamplesref);
aresample->req_fullfilled= 1; aresample->req_fullfilled= 1;
avfilter_unref_buffer(insamplesref); avfilter_unref_buffer(insamplesref);
return ret; return ret;
@ -247,7 +247,7 @@ static int request_frame(AVFilterLink *outlink)
outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate); outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate);
#endif #endif
ff_filter_samples(outlink, outsamplesref); ff_filter_frame(outlink, outsamplesref);
return 0; return 0;
} }
return ret; return ret;
@ -263,7 +263,7 @@ AVFilter avfilter_af_aresample = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL}}, { .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default", .outputs = (const AVFilterPad[]) {{ .name = "default",

View File

@ -125,12 +125,12 @@ static int push_samples(AVFilterLink *outlink)
if (asns->next_out_pts != AV_NOPTS_VALUE) if (asns->next_out_pts != AV_NOPTS_VALUE)
asns->next_out_pts += nb_out_samples; asns->next_out_pts += nb_out_samples;
ff_filter_samples(outlink, outsamples); ff_filter_frame(outlink, outsamples);
asns->req_fullfilled = 1; asns->req_fullfilled = 1;
return nb_out_samples; return nb_out_samples;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ASNSContext *asns = ctx->priv; ASNSContext *asns = ctx->priv;
@ -186,7 +186,7 @@ AVFilter avfilter_af_asetnsamples = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ|AV_PERM_WRITE .min_perms = AV_PERM_READ|AV_PERM_WRITE
}, },
{ .name = NULL } { .name = NULL }

View File

@ -54,7 +54,7 @@ static void uninit(AVFilterContext *ctx)
av_freep(&s->plane_checksums); av_freep(&s->plane_checksums);
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv; AShowInfoContext *s = ctx->priv;
@ -100,7 +100,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_log(ctx, AV_LOG_INFO, "]\n"); av_log(ctx, AV_LOG_INFO, "]\n");
s->frame++; s->frame++;
return ff_filter_samples(inlink->dst->outputs[0], buf); return ff_filter_frame(inlink->dst->outputs[0], buf);
} }
static const AVFilterPad inputs[] = { static const AVFilterPad inputs[] = {
@ -108,7 +108,7 @@ static const AVFilterPad inputs[] = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, .min_perms = AV_PERM_READ,
}, },
{ NULL }, { NULL },

View File

@ -122,7 +122,7 @@ static int send_out(AVFilterContext *ctx, int out_id)
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
(double)ctx->inputs[out_id]->sample_rate; (double)ctx->inputs[out_id]->sample_rate;
ret = ff_filter_samples(ctx->outputs[out_id], buf); ret = ff_filter_frame(ctx->outputs[out_id], buf);
queue->nb--; queue->nb--;
queue->tail = (queue->tail + 1) % QUEUE_SIZE; queue->tail = (queue->tail + 1) % QUEUE_SIZE;
if (as->req[out_id]) if (as->req[out_id])
@ -167,7 +167,7 @@ static int request_frame(AVFilterLink *outlink)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AStreamSyncContext *as = ctx->priv; AStreamSyncContext *as = ctx->priv;
@ -191,11 +191,11 @@ AVFilter avfilter_af_astreamsync = {
.inputs = (const AVFilterPad[]) { .inputs = (const AVFilterPad[]) {
{ .name = "in1", { .name = "in1",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, }, .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
{ .name = "in2", { .name = "in2",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, }, .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
{ .name = NULL } { .name = NULL }
}, },

View File

@ -39,7 +39,7 @@ typedef struct ASyncContext {
float min_delta_sec; float min_delta_sec;
int max_comp; int max_comp;
/* set by filter_samples() to signal an output frame to request_frame() */ /* set by filter_frame() to signal an output frame to request_frame() */
int got_output; int got_output;
} ASyncContext; } ASyncContext;
@ -135,7 +135,7 @@ static int request_frame(AVFilterLink *link)
} }
buf->pts = s->pts; buf->pts = s->pts;
return ff_filter_samples(link, buf); return ff_filter_frame(link, buf);
} }
return ret; return ret;
@ -155,7 +155,7 @@ static int64_t get_delay(ASyncContext *s)
return avresample_available(s->avr) + avresample_get_delay(s->avr); return avresample_available(s->avr) + avresample_get_delay(s->avr);
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv; ASyncContext *s = ctx->priv;
@ -211,7 +211,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_samples_set_silence(buf_out->extended_data, out_size - delta, av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format); delta, nb_channels, buf->format);
} }
ret = ff_filter_samples(outlink, buf_out); ret = ff_filter_frame(outlink, buf_out);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
s->got_output = 1; s->got_output = 1;
@ -237,7 +237,7 @@ static const AVFilterPad avfilter_af_asyncts_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples .filter_frame = filter_frame
}, },
{ NULL } { NULL }
}; };

View File

@ -138,7 +138,7 @@ typedef struct {
RDFTContext *complex_to_real; RDFTContext *complex_to_real;
FFTSample *correlation; FFTSample *correlation;
// for managing AVFilterPad.request_frame and AVFilterPad.filter_samples // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
int request_fulfilled; int request_fulfilled;
AVFilterBufferRef *dst_buffer; AVFilterBufferRef *dst_buffer;
uint8_t *dst; uint8_t *dst;
@ -1033,7 +1033,7 @@ static void push_samples(ATempoContext *atempo,
(AVRational){ 1, outlink->sample_rate }, (AVRational){ 1, outlink->sample_rate },
outlink->time_base); outlink->time_base);
ff_filter_samples(outlink, atempo->dst_buffer); ff_filter_frame(outlink, atempo->dst_buffer);
atempo->dst_buffer = NULL; atempo->dst_buffer = NULL;
atempo->dst = NULL; atempo->dst = NULL;
atempo->dst_end = NULL; atempo->dst_end = NULL;
@ -1041,7 +1041,7 @@ static void push_samples(ATempoContext *atempo,
atempo->nsamples_out += n_out; atempo->nsamples_out += n_out;
} }
static int filter_samples(AVFilterLink *inlink, static int filter_frame(AVFilterLink *inlink,
AVFilterBufferRef *src_buffer) AVFilterBufferRef *src_buffer)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
@ -1148,7 +1148,7 @@ AVFilter avfilter_af_atempo = {
.inputs = (const AVFilterPad[]) { .inputs = (const AVFilterPad[]) {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.config_props = config_props, .config_props = config_props,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL} { .name = NULL}

View File

@ -312,7 +312,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
@ -354,7 +354,7 @@ static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *bu
memcpy(buf->data, buf->extended_data, memcpy(buf->data, buf->extended_data,
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0])); FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
return ff_filter_samples(outlink, buf); return ff_filter_frame(outlink, buf);
} }
static int channelmap_config_input(AVFilterLink *inlink) static int channelmap_config_input(AVFilterLink *inlink)
@ -389,7 +389,7 @@ static const AVFilterPad avfilter_af_channelmap_inputs[] = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.min_perms = AV_PERM_READ | AV_PERM_WRITE, .min_perms = AV_PERM_READ | AV_PERM_WRITE,
.filter_samples = channelmap_filter_samples, .filter_frame = channelmap_filter_frame,
.config_props = channelmap_config_input .config_props = channelmap_config_input
}, },
{ NULL } { NULL }

View File

@ -105,7 +105,7 @@ static int query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
int i, ret = 0; int i, ret = 0;
@ -122,7 +122,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
buf_out->audio->channel_layout = buf_out->audio->channel_layout =
av_channel_layout_extract_channel(buf->audio->channel_layout, i); av_channel_layout_extract_channel(buf->audio->channel_layout, i);
ret = ff_filter_samples(ctx->outputs[i], buf_out); ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0) if (ret < 0)
break; break;
} }
@ -134,7 +134,7 @@ static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
}, },
{ NULL } { NULL }
}; };

View File

@ -120,7 +120,7 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in
return out; return out;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out; int16_t *taps, *endin, *in, *out;
@ -148,7 +148,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
// save part of input for next round // save part of input for next round
memcpy(taps, endin, NUMTAPS * sizeof(*taps)); memcpy(taps, endin, NUMTAPS * sizeof(*taps));
ret = ff_filter_samples(outlink, outsamples); ret = ff_filter_frame(outlink, outsamples);
avfilter_unref_buffer(insamples); avfilter_unref_buffer(insamples);
return ret; return ret;
} }
@ -160,7 +160,7 @@ AVFilter avfilter_af_earwax = {
.priv_size = sizeof(EarwaxContext), .priv_size = sizeof(EarwaxContext),
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.config_props = config_input, .config_props = config_input,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL}}, { .name = NULL}},

View File

@ -94,7 +94,7 @@ static const AVClass join_class = {
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
}; };
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = link->dst; AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv; JoinContext *s = ctx->priv;
@ -229,7 +229,7 @@ static int join_init(AVFilterContext *ctx, const char *args)
snprintf(name, sizeof(name), "input%d", i); snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO; pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name); pad.name = av_strdup(name);
pad.filter_samples = filter_samples; pad.filter_frame = filter_frame;
pad.needs_fifo = 1; pad.needs_fifo = 1;
@ -470,7 +470,7 @@ static int join_request_frame(AVFilterLink *outlink)
priv->nb_in_buffers = ctx->nb_inputs; priv->nb_in_buffers = ctx->nb_inputs;
buf->buf->priv = priv; buf->buf->priv = priv;
ret = ff_filter_samples(outlink, buf); ret = ff_filter_frame(outlink, buf);
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);

View File

@ -353,7 +353,7 @@ static int config_props(AVFilterLink *link)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
int ret; int ret;
int n = insamples->audio->nb_samples; int n = insamples->audio->nb_samples;
@ -365,7 +365,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
avfilter_copy_buffer_ref_props(outsamples, insamples); avfilter_copy_buffer_ref_props(outsamples, insamples);
outsamples->audio->channel_layout = outlink->channel_layout; outsamples->audio->channel_layout = outlink->channel_layout;
ret = ff_filter_samples(outlink, outsamples); ret = ff_filter_frame(outlink, outsamples);
avfilter_unref_buffer(insamples); avfilter_unref_buffer(insamples);
return ret; return ret;
} }
@ -388,7 +388,7 @@ AVFilter avfilter_af_pan = {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props, .config_props = config_props,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL} { .name = NULL}
}, },

View File

@ -40,7 +40,7 @@ typedef struct ResampleContext {
int64_t next_pts; int64_t next_pts;
/* set by filter_samples() to signal an output frame to request_frame() */ /* set by filter_frame() to signal an output frame to request_frame() */
int got_output; int got_output;
} ResampleContext; } ResampleContext;
@ -162,12 +162,12 @@ static int request_frame(AVFilterLink *outlink)
} }
buf->pts = s->next_pts; buf->pts = s->next_pts;
return ff_filter_samples(outlink, buf); return ff_filter_frame(outlink, buf);
} }
return ret; return ret;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv; ResampleContext *s = ctx->priv;
@ -224,7 +224,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
s->next_pts = buf_out->pts + buf_out->audio->nb_samples; s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
ret = ff_filter_samples(outlink, buf_out); ret = ff_filter_frame(outlink, buf_out);
s->got_output = 1; s->got_output = 1;
} }
@ -232,7 +232,7 @@ fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
} else { } else {
buf->format = outlink->format; buf->format = outlink->format;
ret = ff_filter_samples(outlink, buf); ret = ff_filter_frame(outlink, buf);
s->got_output = 1; s->got_output = 1;
} }
@ -243,7 +243,7 @@ static const AVFilterPad avfilter_af_resample_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ .min_perms = AV_PERM_READ
}, },
{ NULL } { NULL }

View File

@ -84,7 +84,7 @@ static char *get_metadata_val(AVFilterBufferRef *insamples, const char *key)
return e && e->value ? e->value : NULL; return e && e->value ? e->value : NULL;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
int i; int i;
SilenceDetectContext *silence = inlink->dst->priv; SilenceDetectContext *silence = inlink->dst->priv;
@ -132,7 +132,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
} }
} }
return ff_filter_samples(inlink->dst->outputs[0], insamples); return ff_filter_frame(inlink->dst->outputs[0], insamples);
} }
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
@ -173,7 +173,7 @@ AVFilter avfilter_af_silencedetect = {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, }, .filter_frame = filter_frame, },
{ .name = NULL } { .name = NULL }
}, },
.outputs = (const AVFilterPad[]) { .outputs = (const AVFilterPad[]) {

View File

@ -110,7 +110,7 @@ static int query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
VolumeContext *vol = inlink->dst->priv; VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterLink *outlink = inlink->dst->outputs[0];
@ -169,7 +169,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
} }
} }
} }
return ff_filter_samples(outlink, insamples); return ff_filter_frame(outlink, insamples);
} }
AVFilter avfilter_af_volume = { AVFilter avfilter_af_volume = {
@ -181,7 +181,7 @@ AVFilter avfilter_af_volume = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ|AV_PERM_WRITE}, .min_perms = AV_PERM_READ|AV_PERM_WRITE},
{ .name = NULL}}, { .name = NULL}},

View File

@ -49,7 +49,7 @@ static int query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
VolDetectContext *vd = ctx->priv; VolDetectContext *vd = ctx->priv;
@ -70,7 +70,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samples)
vd->histogram[pcm[i] + 0x8000]++; vd->histogram[pcm[i] + 0x8000]++;
} }
return ff_filter_samples(inlink->dst->outputs[0], samples); return ff_filter_frame(inlink->dst->outputs[0], samples);
} }
#define MAX_DB 91 #define MAX_DB 91
@ -143,7 +143,7 @@ AVFilter avfilter_af_volumedetect = {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, }, .min_perms = AV_PERM_READ, },
{ .name = NULL } { .name = NULL }
}, },

View File

@ -22,7 +22,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
{ {
avfilter_unref_bufferp(&samplesref); avfilter_unref_bufferp(&samplesref);
return 0; return 0;
@ -32,7 +32,7 @@ static const AVFilterPad avfilter_asink_anullsink_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = null_filter_samples, .filter_frame = null_filter_frame,
}, },
{ NULL }, { NULL },
}; };

View File

@ -237,7 +237,7 @@ static int request_frame(AVFilterLink *outlink)
samplesref->audio->sample_rate = eval->sample_rate; samplesref->audio->sample_rate = eval->sample_rate;
eval->pts += eval->nb_samples; eval->pts += eval->nb_samples;
ff_filter_samples(outlink, samplesref); ff_filter_frame(outlink, samplesref);
return 0; return 0;
} }

View File

@ -111,7 +111,7 @@ static int request_frame(AVFilterLink *outlink)
samplesref->audio->channel_layout = null->channel_layout; samplesref->audio->channel_layout = null->channel_layout;
samplesref->audio->sample_rate = outlink->sample_rate; samplesref->audio->sample_rate = outlink->sample_rate;
ff_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0)); ff_filter_frame(outlink, avfilter_ref_buffer(samplesref, ~0));
avfilter_unref_buffer(samplesref); avfilter_unref_buffer(samplesref);
null->pts += null->nb_samples; null->pts += null->nb_samples;

View File

@ -265,7 +265,7 @@ static int request_frame(AVFilterLink *outlink)
flite->wave_samples += nb_samples * flite->wave->num_channels; flite->wave_samples += nb_samples * flite->wave->num_channels;
flite->wave_nb_samples -= nb_samples; flite->wave_nb_samples -= nb_samples;
return ff_filter_samples(outlink, samplesref); return ff_filter_frame(outlink, samplesref);
} }
AVFilter avfilter_asrc_flite = { AVFilter avfilter_asrc_flite = {

View File

@ -157,30 +157,30 @@ fail:
return NULL; return NULL;
} }
static int default_filter_samples(AVFilterLink *link, static int default_filter_frame(AVFilterLink *link,
AVFilterBufferRef *samplesref) AVFilterBufferRef *samplesref)
{ {
return ff_filter_samples(link->dst->outputs[0], samplesref); return ff_filter_frame(link->dst->outputs[0], samplesref);
} }
int ff_filter_samples_framed(AVFilterLink *link, AVFilterBufferRef *samplesref) int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *samplesref)
{ {
int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *); int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *src = link->srcpad; AVFilterPad *src = link->srcpad;
AVFilterPad *dst = link->dstpad; AVFilterPad *dst = link->dstpad;
int64_t pts; int64_t pts;
AVFilterBufferRef *buf_out; AVFilterBufferRef *buf_out;
int ret; int ret;
FF_TPRINTF_START(NULL, filter_samples); ff_tlog_link(NULL, link, 1); FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1);
if (link->closed) { if (link->closed) {
avfilter_unref_buffer(samplesref); avfilter_unref_buffer(samplesref);
return AVERROR_EOF; return AVERROR_EOF;
} }
if (!(filter_samples = dst->filter_samples)) if (!(filter_frame = dst->filter_frame))
filter_samples = default_filter_samples; filter_frame = default_filter_frame;
av_assert1((samplesref->perms & src->min_perms) == src->min_perms); av_assert1((samplesref->perms & src->min_perms) == src->min_perms);
samplesref->perms &= ~ src->rej_perms; samplesref->perms &= ~ src->rej_perms;
@ -213,12 +213,12 @@ int ff_filter_samples_framed(AVFilterLink *link, AVFilterBufferRef *samplesref)
link->cur_buf = buf_out; link->cur_buf = buf_out;
pts = buf_out->pts; pts = buf_out->pts;
ret = filter_samples(link, buf_out); ret = filter_frame(link, buf_out);
ff_update_link_current_pts(link, pts); ff_update_link_current_pts(link, pts);
return ret; return ret;
} }
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
{ {
int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples; int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples;
AVFilterBufferRef *pbuf = link->partial_buf; AVFilterBufferRef *pbuf = link->partial_buf;
@ -232,7 +232,7 @@ int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
if (!link->min_samples || if (!link->min_samples ||
(!pbuf && (!pbuf &&
insamples >= link->min_samples && insamples <= link->max_samples)) { insamples >= link->min_samples && insamples <= link->max_samples)) {
return ff_filter_samples_framed(link, samplesref); return ff_filter_frame_framed(link, samplesref);
} }
/* Handle framing (min_samples, max_samples) */ /* Handle framing (min_samples, max_samples) */
while (insamples) { while (insamples) {
@ -259,7 +259,7 @@ int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
insamples -= nb_samples; insamples -= nb_samples;
pbuf->audio->nb_samples += nb_samples; pbuf->audio->nb_samples += nb_samples;
if (pbuf->audio->nb_samples >= link->min_samples) { if (pbuf->audio->nb_samples >= link->min_samples) {
ret = ff_filter_samples_framed(link, pbuf); ret = ff_filter_frame_framed(link, pbuf);
pbuf = NULL; pbuf = NULL;
} }
} }

View File

@ -74,13 +74,13 @@ AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
* @return >= 0 on success, a negative AVERROR on error. The receiving filter * @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing samplesref in case of error. * is responsible for unreferencing samplesref in case of error.
*/ */
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref); int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** /**
* Send a buffer of audio samples to the next link, without checking * Send a buffer of audio samples to the next link, without checking
* min_samples. * min_samples.
*/ */
int ff_filter_samples_framed(AVFilterLink *link, int ff_filter_frame_framed(AVFilterLink *link,
AVFilterBufferRef *samplesref); AVFilterBufferRef *samplesref);
#endif /* AVFILTER_AUDIO_H */ #endif /* AVFILTER_AUDIO_H */

View File

@ -185,7 +185,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no,
ff_end_frame(outlink); ff_end_frame(outlink);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ff_filter_samples(outlink, buf); ff_filter_frame(outlink, buf);
break; break;
} }
} }
@ -244,7 +244,7 @@ static int end_frame(AVFilterLink *inlink)
return 0; return 0;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
process_frame(inlink, buf); process_frame(inlink, buf);
return 0; /* enhancement: handle error return */ return 0; /* enhancement: handle error return */
@ -297,7 +297,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
nb_channels, outlink->format); nb_channels, outlink->format);
buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base); buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
ff_filter_samples(outlink, buf); ff_filter_frame(outlink, buf);
sent += frame_nb_samples; sent += frame_nb_samples;
nb_samples -= frame_nb_samples; nb_samples -= frame_nb_samples;
} }
@ -397,7 +397,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
pad.draw_slice = draw_slice; pad.draw_slice = draw_slice;
pad.end_frame = end_frame; pad.end_frame = end_frame;
} else { } else {
pad.filter_samples = filter_samples; pad.filter_frame = filter_frame;
} }
ff_insert_inpad(ctx, ctx->nb_inputs, &pad); ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
} }

View File

@ -281,7 +281,7 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insampl
return add_samples; return add_samples;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ShowSpectrumContext *showspectrum = ctx->priv; ShowSpectrumContext *showspectrum = ctx->priv;
@ -310,7 +310,7 @@ AVFilter avfilter_avf_showspectrum = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, .min_perms = AV_PERM_READ,
}, },
{ .name = NULL } { .name = NULL }

View File

@ -179,7 +179,7 @@ static int request_frame(AVFilterLink *outlink)
#define MAX_INT16 ((1<<15) -1) #define MAX_INT16 ((1<<15) -1)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
@ -240,7 +240,7 @@ AVFilter avfilter_avf_showwaves = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ, .min_perms = AV_PERM_READ,
}, },
{ .name = NULL } { .name = NULL }

View File

@ -343,7 +343,7 @@ int ff_request_frame(AVFilterLink *link)
if (ret == AVERROR_EOF && link->partial_buf) { if (ret == AVERROR_EOF && link->partial_buf) {
AVFilterBufferRef *pbuf = link->partial_buf; AVFilterBufferRef *pbuf = link->partial_buf;
link->partial_buf = NULL; link->partial_buf = NULL;
ff_filter_samples_framed(link, pbuf); ff_filter_frame_framed(link, pbuf);
return 0; return 0;
} }
if (ret == AVERROR_EOF) if (ret == AVERROR_EOF)

View File

@ -339,7 +339,7 @@ struct AVFilterPad {
* must ensure that samplesref is properly unreferenced on error if it * must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter. * hasn't been passed on to another filter.
*/ */
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** /**
* Frame poll callback. This returns the number of immediately available * Frame poll callback. This returns the number of immediately available
@ -678,7 +678,7 @@ struct AVFilterLink {
int partial_buf_size; int partial_buf_size;
/** /**
* Minimum number of samples to filter at once. If filter_samples() is * Minimum number of samples to filter at once. If filter_frame() is
* called with fewer samples, it will accumulate them in partial_buf. * called with fewer samples, it will accumulate them in partial_buf.
* This field and the related ones must not be changed after filtering * This field and the related ones must not be changed after filtering
* has started. * has started.
@ -687,7 +687,7 @@ struct AVFilterLink {
int min_samples; int min_samples;
/** /**
* Maximum number of samples to filter at once. If filter_samples() is * Maximum number of samples to filter at once. If filter_frame() is
* called with more samples, it will split them. * called with more samples, it will split them.
*/ */
int max_samples; int max_samples;
@ -703,7 +703,7 @@ struct AVFilterLink {
/** /**
* True if the link is closed. * True if the link is closed.
* If set, all attemps of start_frame, filter_samples or request_frame * If set, all attemps of start_frame, filter_frame or request_frame
* will fail with AVERROR_EOF, and if necessary the reference will be * will fail with AVERROR_EOF, and if necessary the reference will be
* destroyed. * destroyed.
* If request_frame returns AVERROR_EOF, this flag is set on the * If request_frame returns AVERROR_EOF, this flag is set on the

View File

@ -169,7 +169,7 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = start_frame, .filter_frame = start_frame,
.min_perms = AV_PERM_READ, .min_perms = AV_PERM_READ,
.needs_fifo = 1 .needs_fifo = 1
}, },

View File

@ -379,7 +379,7 @@ static int request_frame(AVFilterLink *link)
return ret; return ret;
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ret = ff_filter_samples(link, buf); ret = ff_filter_frame(link, buf);
break; break;
default: default:
avfilter_unref_bufferp(&buf); avfilter_unref_bufferp(&buf);

View File

@ -436,7 +436,7 @@ static int gate_update(struct integrator *integ, double power,
return gate_hist_pos; return gate_hist_pos;
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{ {
int i, ch; int i, ch;
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
@ -638,7 +638,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
} }
} }
return ff_filter_samples(ctx->outputs[ebur128->do_video], insamples); return ff_filter_frame(ctx->outputs[ebur128->do_video], insamples);
} }
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
@ -740,7 +740,7 @@ AVFilter avfilter_af_ebur128 = {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, }, .filter_frame = filter_frame, },
{ .name = NULL } { .name = NULL }
}, },
.outputs = NULL, .outputs = NULL,

View File

@ -511,7 +511,7 @@ end:
switch (inlink->type) { switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO: return ff_start_frame (inlink->dst->outputs[0], ref); case AVMEDIA_TYPE_VIDEO: return ff_start_frame (inlink->dst->outputs[0], ref);
case AVMEDIA_TYPE_AUDIO: return ff_filter_samples(inlink->dst->outputs[0], ref); case AVMEDIA_TYPE_AUDIO: return ff_filter_frame(inlink->dst->outputs[0], ref);
} }
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
@ -562,7 +562,7 @@ AVFilter avfilter_af_asendcmd = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = process_frame, .filter_frame = process_frame,
}, },
{ .name = NULL } { .name = NULL }
}, },

View File

@ -174,7 +174,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
setpts->var_values[VAR_N] += 1.0; setpts->var_values[VAR_N] += 1.0;
if (setpts->type == AVMEDIA_TYPE_AUDIO) { if (setpts->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples; setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples;
return ff_filter_samples(inlink->dst->outputs[0], outpicref); return ff_filter_frame(inlink->dst->outputs[0], outpicref);
} else } else
return ff_start_frame (inlink->dst->outputs[0], outpicref); return ff_start_frame (inlink->dst->outputs[0], outpicref);
} }
@ -201,7 +201,7 @@ AVFilter avfilter_af_asetpts = {
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.config_props = config_input, .config_props = config_input,
.filter_samples = filter_frame, .filter_frame = filter_frame,
}, },
{ .name = NULL } { .name = NULL }
}, },

View File

@ -120,7 +120,7 @@ static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
return ff_start_frame(outlink, picref); return ff_start_frame(outlink, picref);
} }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
@ -133,7 +133,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
outlink->time_base.num, outlink->time_base.den, samplesref->pts); outlink->time_base.num, outlink->time_base.den, samplesref->pts);
} }
return ff_filter_samples(outlink, samplesref); return ff_filter_frame(outlink, samplesref);
} }
#if CONFIG_SETTB_FILTER #if CONFIG_SETTB_FILTER
@ -181,7 +181,7 @@ AVFilter avfilter_af_asettb = {
{ .name = "default", { .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples, }, .filter_frame = filter_frame, },
{ .name = NULL } { .name = NULL }
}, },
.outputs = (const AVFilterPad[]) { .outputs = (const AVFilterPad[]) {

View File

@ -228,7 +228,7 @@ static int return_audio_frame(AVFilterContext *ctx)
buf_out = s->buf_out; buf_out = s->buf_out;
s->buf_out = NULL; s->buf_out = NULL;
} }
return ff_filter_samples(link, buf_out); return ff_filter_frame(link, buf_out);
} }
static int request_frame(AVFilterLink *outlink) static int request_frame(AVFilterLink *outlink)
@ -257,7 +257,7 @@ static int request_frame(AVFilterLink *outlink)
if (outlink->request_samples) { if (outlink->request_samples) {
return return_audio_frame(outlink->src); return return_audio_frame(outlink->src);
} else { } else {
ret = ff_filter_samples(outlink, fifo->root.next->buf); ret = ff_filter_frame(outlink, fifo->root.next->buf);
queue_pop(fifo); queue_pop(fifo);
} }
break; break;
@ -308,7 +308,7 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = add_to_queue, .filter_frame = add_to_queue,
.min_perms = AV_PERM_PRESERVE, .min_perms = AV_PERM_PRESERVE,
}, },
{ NULL } { NULL }

View File

@ -147,7 +147,7 @@ struct AVFilterPad {
* must ensure that samplesref is properly unreferenced on error if it * must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter. * hasn't been passed on to another filter.
*/ */
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** /**
* Frame poll callback. This returns the number of immediately available * Frame poll callback. This returns the number of immediately available

View File

@ -268,7 +268,7 @@ AVFilter avfilter_vsink_buffersink = {
.outputs = (const AVFilterPad[]) {{ .name = NULL }}, .outputs = (const AVFilterPad[]) {{ .name = NULL }},
}; };
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) static int filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
{ {
end_frame(link); end_frame(link);
return 0; return 0;
@ -338,7 +338,7 @@ AVFilter avfilter_asink_ffabuffersink = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, }, .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
{ .name = NULL }}, { .name = NULL }},
.outputs = (const AVFilterPad[]) {{ .name = NULL }}, .outputs = (const AVFilterPad[]) {{ .name = NULL }},
@ -354,7 +354,7 @@ AVFilter avfilter_asink_abuffersink = {
.inputs = (const AVFilterPad[]) {{ .name = "default", .inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = filter_samples, .filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, }, .min_perms = AV_PERM_READ | AV_PERM_PRESERVE, },
{ .name = NULL }}, { .name = NULL }},
.outputs = (const AVFilterPad[]) {{ .name = NULL }}, .outputs = (const AVFilterPad[]) {{ .name = NULL }},
@ -372,13 +372,13 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
if (ctx->filter-> inputs[0].start_frame == if (ctx->filter-> inputs[0].start_frame ==
avfilter_vsink_buffer. inputs[0].start_frame || avfilter_vsink_buffer. inputs[0].start_frame ||
ctx->filter-> inputs[0].filter_samples == ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_abuffer.inputs[0].filter_samples) avfilter_asink_abuffer.inputs[0].filter_frame)
return ff_buffersink_read_compat(ctx, buf); return ff_buffersink_read_compat(ctx, buf);
av_assert0(ctx->filter-> inputs[0].end_frame == av_assert0(ctx->filter-> inputs[0].end_frame ==
avfilter_vsink_ffbuffersink. inputs[0].end_frame || avfilter_vsink_ffbuffersink. inputs[0].end_frame ||
ctx->filter-> inputs[0].filter_samples == ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_ffabuffersink.inputs[0].filter_samples); avfilter_asink_ffabuffersink.inputs[0].filter_frame);
ret = av_buffersink_get_buffer_ref(ctx, &tbuf, ret = av_buffersink_get_buffer_ref(ctx, &tbuf,
buf ? 0 : AV_BUFFERSINK_FLAG_PEEK); buf ? 0 : AV_BUFFERSINK_FLAG_PEEK);
@ -399,11 +399,11 @@ int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
AVFilterLink *link = ctx->inputs[0]; AVFilterLink *link = ctx->inputs[0];
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (ctx->filter-> inputs[0].filter_samples == if (ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_abuffer.inputs[0].filter_samples) avfilter_asink_abuffer.inputs[0].filter_frame)
return ff_buffersink_read_samples_compat(ctx, buf, nb_samples); return ff_buffersink_read_samples_compat(ctx, buf, nb_samples);
av_assert0(ctx->filter-> inputs[0].filter_samples == av_assert0(ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_ffabuffersink.inputs[0].filter_samples); avfilter_asink_ffabuffersink.inputs[0].filter_frame);
tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples); tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples);
if (!tbuf) if (!tbuf)

View File

@ -142,7 +142,7 @@ AVFilter avfilter_vf_split = {
.outputs = NULL, .outputs = NULL,
}; };
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
int i, ret = 0; int i, ret = 0;
@ -155,7 +155,7 @@ static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
break; break;
} }
ret = ff_filter_samples(inlink->dst->outputs[i], buf_out); ret = ff_filter_frame(inlink->dst->outputs[i], buf_out);
if (ret < 0) if (ret < 0)
break; break;
} }
@ -168,7 +168,7 @@ static const AVFilterPad avfilter_af_asplit_inputs[] = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer, .get_audio_buffer = ff_null_get_audio_buffer,
.filter_samples = filter_samples .filter_frame = filter_frame
}, },
{ NULL } { NULL }
}; };

View File

@ -577,7 +577,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
ff_end_frame(outlink); ff_end_frame(outlink);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ff_filter_samples(outlink, buf); ff_filter_frame(outlink, buf);
break; break;
} }