1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

lavfi: add error handling to filter_samples().

This commit is contained in:
Anton Khirnov 2012-07-02 20:13:40 +02:00
parent 8d18bc550e
commit cd99146253
15 changed files with 136 additions and 69 deletions

View File

@ -311,9 +311,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
if (s->next_pts != AV_NOPTS_VALUE) if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples; s->next_pts += nb_samples;
ff_filter_samples(outlink, out_buf); return ff_filter_samples(outlink, out_buf);
return 0;
} }
/** /**
@ -454,31 +452,37 @@ static int request_frame(AVFilterLink *outlink)
return output_frame(outlink, available_samples); return output_frame(outlink, available_samples);
} }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv; MixContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
int i; int i, ret = 0;
for (i = 0; i < ctx->nb_inputs; i++) for (i = 0; i < ctx->nb_inputs; i++)
if (ctx->inputs[i] == inlink) if (ctx->inputs[i] == inlink)
break; break;
if (i >= ctx->nb_inputs) { if (i >= ctx->nb_inputs) {
av_log(ctx, AV_LOG_ERROR, "unknown input link\n"); av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
return; ret = AVERROR(EINVAL);
goto fail;
} }
if (i == 0) { if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base, int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base); outlink->time_base);
frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts); ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
if (ret < 0)
goto fail;
} }
av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples); buf->audio->nb_samples);
fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
return ret;
} }
static int init(AVFilterContext *ctx, const char *args) static int init(AVFilterContext *ctx, const char *args)

View File

@ -136,18 +136,18 @@ static int request_frame(AVFilterLink *link)
avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0], avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0],
nb_samples, NULL, 0, 0); nb_samples, NULL, 0, 0);
buf->pts = s->pts; buf->pts = s->pts;
ff_filter_samples(link, buf); return ff_filter_samples(link, buf);
return 0;
} }
return ret; return ret;
} }
static void write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf) static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
{ {
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data, int ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples); buf->linesize[0], buf->audio->nb_samples);
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
return ret;
} }
/* get amount of data currently buffered, in samples */ /* get amount of data currently buffered, in samples */
@ -156,7 +156,7 @@ static int64_t get_delay(ASyncContext *s)
return avresample_available(s->avr) + avresample_get_delay(s->avr); return avresample_available(s->avr) + avresample_get_delay(s->avr);
} }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv; ASyncContext *s = ctx->priv;
@ -164,7 +164,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size; int out_size, ret;
int64_t delta; int64_t delta;
/* buffer data until we get the first timestamp */ /* buffer data until we get the first timestamp */
@ -172,14 +172,12 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (pts != AV_NOPTS_VALUE) { if (pts != AV_NOPTS_VALUE) {
s->pts = pts - get_delay(s); s->pts = pts - get_delay(s);
} }
write_to_fifo(s, buf); return write_to_fifo(s, buf);
return;
} }
/* now wait for the next timestamp */ /* now wait for the next timestamp */
if (pts == AV_NOPTS_VALUE) { if (pts == AV_NOPTS_VALUE) {
write_to_fifo(s, buf); return write_to_fifo(s, buf);
return;
} }
/* when we have two timestamps, compute how many samples would we have /* when we have two timestamps, compute how many samples would we have
@ -202,8 +200,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (out_size > 0) { if (out_size > 0) {
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
out_size); out_size);
if (!buf_out) if (!buf_out) {
return; ret = AVERROR(ENOMEM);
goto fail;
}
avresample_read(s->avr, (void**)buf_out->extended_data, out_size); avresample_read(s->avr, (void**)buf_out->extended_data, out_size);
buf_out->pts = s->pts; buf_out->pts = s->pts;
@ -212,7 +212,9 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_samples_set_silence(buf_out->extended_data, out_size - delta, av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format); delta, nb_channels, buf->format);
} }
ff_filter_samples(outlink, buf_out); ret = ff_filter_samples(outlink, buf_out);
if (ret < 0)
goto fail;
s->got_output = 1; s->got_output = 1;
} else { } else {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping " av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
@ -223,9 +225,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
avresample_read(s->avr, NULL, avresample_available(s->avr)); avresample_read(s->avr, NULL, avresample_available(s->avr));
s->pts = pts - avresample_get_delay(s->avr); s->pts = pts - avresample_get_delay(s->avr);
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data, ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples); buf->linesize[0], buf->audio->nb_samples);
fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
return ret;
} }
AVFilter avfilter_af_asyncts = { AVFilter avfilter_af_asyncts = {

View File

@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
@ -330,8 +330,10 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
if (nch_out > FF_ARRAY_ELEMS(buf->data)) { if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data = uint8_t **new_extended_data =
av_mallocz(nch_out * sizeof(*buf->extended_data)); av_mallocz(nch_out * sizeof(*buf->extended_data));
if (!new_extended_data) if (!new_extended_data) {
return; avfilter_unref_buffer(buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) { if (buf->extended_data == buf->data) {
buf->extended_data = new_extended_data; buf->extended_data = new_extended_data;
} else { } else {
@ -353,7 +355,7 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
memcpy(buf->data, buf->extended_data, memcpy(buf->data, buf->extended_data,
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0])); FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
ff_filter_samples(outlink, buf); return ff_filter_samples(outlink, buf);
} }
static int channelmap_config_input(AVFilterLink *inlink) static int channelmap_config_input(AVFilterLink *inlink)

View File

@ -110,24 +110,29 @@ static int query_formats(AVFilterContext *ctx)
return 0; return 0;
} }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
int i; int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) { for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE); AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
if (!buf_out) if (!buf_out) {
return; ret = AVERROR(ENOMEM);
break;
}
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->audio->channel_layout = buf_out->audio->channel_layout =
av_channel_layout_extract_channel(buf->audio->channel_layout, i); av_channel_layout_extract_channel(buf->audio->channel_layout, i);
ff_filter_samples(ctx->outputs[i], buf_out); ret = ff_filter_samples(ctx->outputs[i], buf_out);
if (ret < 0)
break;
} }
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
return ret;
} }
AVFilter avfilter_af_channelsplit = { AVFilter avfilter_af_channelsplit = {

View File

@ -92,7 +92,7 @@ static const AVClass join_class = {
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
}; };
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf) static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = link->dst; AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv; JoinContext *s = ctx->priv;
@ -104,6 +104,8 @@ static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
av_assert0(i < ctx->nb_inputs); av_assert0(i < ctx->nb_inputs);
av_assert0(!s->input_frames[i]); av_assert0(!s->input_frames[i]);
s->input_frames[i] = buf; s->input_frames[i] = buf;
return 0;
} }
static int parse_maps(AVFilterContext *ctx) static int parse_maps(AVFilterContext *ctx)
@ -468,11 +470,11 @@ static int join_request_frame(AVFilterLink *outlink)
priv->nb_in_buffers = ctx->nb_inputs; priv->nb_in_buffers = ctx->nb_inputs;
buf->buf->priv = priv; buf->buf->priv = priv;
ff_filter_samples(outlink, buf); ret = ff_filter_samples(outlink, buf);
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
return 0; return ret;
fail: fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);

View File

@ -157,21 +157,21 @@ static int request_frame(AVFilterLink *outlink)
} }
buf->pts = s->next_pts; buf->pts = s->next_pts;
ff_filter_samples(outlink, buf); return ff_filter_samples(outlink, buf);
return 0;
} }
return ret; return ret;
} }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv; ResampleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
int ret;
if (s->avr) { if (s->avr) {
AVFilterBufferRef *buf_out; AVFilterBufferRef *buf_out;
int delay, nb_samples, ret; int delay, nb_samples;
/* maximum possible samples lavr can output */ /* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr); delay = avresample_get_delay(s->avr);
@ -180,10 +180,19 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
AV_ROUND_UP); AV_ROUND_UP);
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avresample_convert(s->avr, (void**)buf_out->extended_data, ret = avresample_convert(s->avr, (void**)buf_out->extended_data,
buf_out->linesize[0], nb_samples, buf_out->linesize[0], nb_samples,
(void**)buf->extended_data, buf->linesize[0], (void**)buf->extended_data, buf->linesize[0],
buf->audio->nb_samples); buf->audio->nb_samples);
if (ret < 0) {
avfilter_unref_buffer(buf_out);
goto fail;
}
av_assert0(!avresample_available(s->avr)); av_assert0(!avresample_available(s->avr));
@ -209,14 +218,18 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
s->next_pts = buf_out->pts + buf_out->audio->nb_samples; s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
ff_filter_samples(outlink, buf_out); ret = ff_filter_samples(outlink, buf_out);
s->got_output = 1; s->got_output = 1;
} }
fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
} else { } else {
ff_filter_samples(outlink, buf); ret = ff_filter_samples(outlink, buf);
s->got_output = 1; s->got_output = 1;
} }
return ret;
} }
AVFilter avfilter_af_resample = { AVFilter avfilter_af_resample = {

View File

@ -19,7 +19,10 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { } static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
return 0;
}
AVFilter avfilter_asink_anullsink = { AVFilter avfilter_asink_anullsink = {
.name = "anullsink", .name = "anullsink",

View File

@ -146,15 +146,15 @@ fail:
return NULL; return NULL;
} }
static void default_filter_samples(AVFilterLink *link, static int default_filter_samples(AVFilterLink *link,
AVFilterBufferRef *samplesref) AVFilterBufferRef *samplesref)
{ {
ff_filter_samples(link->dst->outputs[0], samplesref); return ff_filter_samples(link->dst->outputs[0], samplesref);
} }
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{ {
void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *); int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad; AVFilterPad *dst = link->dstpad;
AVFilterBufferRef *buf_out; AVFilterBufferRef *buf_out;
@ -185,6 +185,6 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
} else } else
buf_out = samplesref; buf_out = samplesref;
filter_samples(link, buf_out); return filter_samples(link, buf_out);
} }

View File

@ -49,7 +49,10 @@ AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
* @param samplesref a reference to the buffer of audio samples being sent. The * @param samplesref a reference to the buffer of audio samples being sent. The
* receiving filter will free this reference when it no longer * receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter. * needs it or pass it on to the next filter.
*
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing samplesref in case of error.
*/ */
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref); int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
#endif /* AVFILTER_AUDIO_H */ #endif /* AVFILTER_AUDIO_H */

View File

@ -288,8 +288,12 @@ struct AVFilterPad {
* and should do its processing. * and should do its processing.
* *
* Input audio pads only. * Input audio pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/ */
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** /**
* Frame poll callback. This returns the number of immediately available * Frame poll callback. This returns the number of immediately available

View File

@ -56,6 +56,12 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *buf)
link->cur_buf = NULL; link->cur_buf = NULL;
}; };
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
{
start_frame(link, buf);
return 0;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{ {
BufferSinkContext *s = ctx->priv; BufferSinkContext *s = ctx->priv;
@ -160,7 +166,7 @@ AVFilter avfilter_asink_abuffer = {
.inputs = (AVFilterPad[]) {{ .name = "default", .inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_samples = start_frame, .filter_samples = filter_samples,
.min_perms = AV_PERM_READ, .min_perms = AV_PERM_READ,
.needs_fifo = 1 }, .needs_fifo = 1 },
{ .name = NULL }}, { .name = NULL }},

View File

@ -312,6 +312,7 @@ static int request_frame(AVFilterLink *link)
{ {
BufferSourceContext *c = link->src->priv; BufferSourceContext *c = link->src->priv;
AVFilterBufferRef *buf; AVFilterBufferRef *buf;
int ret = 0;
if (!av_fifo_size(c->fifo)) { if (!av_fifo_size(c->fifo)) {
if (c->eof) if (c->eof)
@ -327,7 +328,7 @@ static int request_frame(AVFilterLink *link)
ff_end_frame(link); ff_end_frame(link);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ff_filter_samples(link, avfilter_ref_buffer(buf, ~0)); ret = ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
break; break;
default: default:
return AVERROR(EINVAL); return AVERROR(EINVAL);
@ -335,7 +336,7 @@ static int request_frame(AVFilterLink *link)
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
return 0; return ret;
} }
static int poll_frame(AVFilterLink *link) static int poll_frame(AVFilterLink *link)

View File

@ -72,13 +72,25 @@ static av_cold void uninit(AVFilterContext *ctx)
avfilter_unref_buffer(fifo->buf_out); avfilter_unref_buffer(fifo->buf_out);
} }
static void add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf) static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
{ {
FifoContext *fifo = inlink->dst->priv; FifoContext *fifo = inlink->dst->priv;
fifo->last->next = av_mallocz(sizeof(Buf)); fifo->last->next = av_mallocz(sizeof(Buf));
if (!fifo->last->next) {
avfilter_unref_buffer(buf);
return AVERROR(ENOMEM);
}
fifo->last = fifo->last->next; fifo->last = fifo->last->next;
fifo->last->buf = buf; fifo->last->buf = buf;
return 0;
}
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
add_to_queue(inlink, buf);
} }
static void queue_pop(FifoContext *s) static void queue_pop(FifoContext *s)
@ -210,15 +222,13 @@ static int return_audio_frame(AVFilterContext *ctx)
buf_out = s->buf_out; buf_out = s->buf_out;
s->buf_out = NULL; s->buf_out = NULL;
} }
ff_filter_samples(link, buf_out); return ff_filter_samples(link, buf_out);
return 0;
} }
static int request_frame(AVFilterLink *outlink) static int request_frame(AVFilterLink *outlink)
{ {
FifoContext *fifo = outlink->src->priv; FifoContext *fifo = outlink->src->priv;
int ret; int ret = 0;
if (!fifo->root.next) { if (!fifo->root.next) {
if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
@ -238,7 +248,7 @@ static int request_frame(AVFilterLink *outlink)
if (outlink->request_samples) { if (outlink->request_samples) {
return return_audio_frame(outlink->src); return return_audio_frame(outlink->src);
} else { } else {
ff_filter_samples(outlink, fifo->root.next->buf); ret = ff_filter_samples(outlink, fifo->root.next->buf);
queue_pop(fifo); queue_pop(fifo);
} }
break; break;
@ -246,7 +256,7 @@ static int request_frame(AVFilterLink *outlink)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
return 0; return ret;
} }
AVFilter avfilter_vf_fifo = { AVFilter avfilter_vf_fifo = {
@ -261,7 +271,7 @@ AVFilter avfilter_vf_fifo = {
.inputs = (AVFilterPad[]) {{ .name = "default", .inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= ff_null_get_video_buffer, .get_video_buffer= ff_null_get_video_buffer,
.start_frame = add_to_queue, .start_frame = start_frame,
.draw_slice = draw_slice, .draw_slice = draw_slice,
.end_frame = end_frame, .end_frame = end_frame,
.rej_perms = AV_PERM_REUSE2, }, .rej_perms = AV_PERM_REUSE2, },

View File

@ -111,8 +111,12 @@ struct AVFilterPad {
* and should do its processing. * and should do its processing.
* *
* Input audio pads only. * Input audio pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/ */
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/** /**
* Frame poll callback. This returns the number of immediately available * Frame poll callback. This returns the number of immediately available

View File

@ -110,15 +110,19 @@ AVFilter avfilter_vf_split = {
.outputs = (AVFilterPad[]) {{ .name = NULL}}, .outputs = (AVFilterPad[]) {{ .name = NULL}},
}; };
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
int i; int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) for (i = 0; i < ctx->nb_outputs; i++) {
ff_filter_samples(inlink->dst->outputs[i], ret = ff_filter_samples(inlink->dst->outputs[i],
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE)); avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
if (ret < 0)
break;
}
avfilter_unref_buffer(samplesref); avfilter_unref_buffer(samplesref);
return ret;
} }
AVFilter avfilter_af_asplit = { AVFilter avfilter_af_asplit = {