1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avfilter/f_cue: use inlink fifo for queueing frames

Signed-off-by: Marton Balint <cus@passwd.hu>
This commit is contained in:
Marton Balint 2018-09-30 12:52:44 +02:00
parent 8f14170b9a
commit 7ca2ee059e

View File

@ -22,7 +22,6 @@
#include "libavutil/time.h"
#include "avfilter.h"
#include "filters.h"
#include "framequeue.h"
#include "internal.h"
typedef struct CueContext {
@ -32,73 +31,49 @@ typedef struct CueContext {
int64_t preroll;
int64_t buffer;
int status;
FFFrameQueue queue;
} CueContext;
static av_cold int init(AVFilterContext *ctx)
{
CueContext *s = ctx->priv;
ff_framequeue_init(&s->queue, &ctx->graph->internal->frame_queues);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
CueContext *s = ctx->priv;
ff_framequeue_free(&s->queue);
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
CueContext *s = ctx->priv;
int64_t pts;
AVFrame *frame = NULL;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (s->status < 3 || s->status == 5) {
int ret = ff_inlink_consume_frame(inlink, &frame);
if (ret < 0)
return ret;
if (frame)
pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
}
if (ff_inlink_queued_frames(inlink)) {
AVFrame *frame = ff_inlink_peek_frame(inlink, 0);
int64_t pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
if (!s->status && frame) {
s->first_pts = pts;
s->status++;
}
if (s->status == 1 && frame) {
if (pts - s->first_pts < s->preroll)
return ff_filter_frame(outlink, frame);
s->first_pts = pts;
s->status++;
}
if (s->status == 2 && frame) {
int ret = ff_framequeue_add(&s->queue, frame);
if (ret < 0) {
av_frame_free(&frame);
return ret;
}
frame = NULL;
if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0))
if (!s->status) {
s->first_pts = pts;
s->status++;
}
if (s->status == 1) {
if (pts - s->first_pts < s->preroll) {
ff_inlink_consume_frame(inlink, &frame);
return ff_filter_frame(outlink, frame);
}
s->first_pts = pts;
s->status++;
}
if (s->status == 2) {
frame = ff_inlink_peek_frame(inlink, ff_inlink_queued_frames(inlink) - 1);
pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0))
s->status++;
}
if (s->status == 3) {
int64_t diff;
while ((diff = (av_gettime() - s->cue)) < 0)
av_usleep(av_clip(-diff / 2, 100, 1000000));
s->status++;
}
if (s->status == 4) {
ff_inlink_consume_frame(inlink, &frame);
return ff_filter_frame(outlink, frame);
}
}
if (s->status == 3) {
int64_t diff;
while ((diff = (av_gettime() - s->cue)) < 0)
av_usleep(av_clip(-diff / 2, 100, 1000000));
s->status++;
}
if (s->status == 4) {
if (ff_framequeue_queued_frames(&s->queue))
return ff_filter_frame(outlink, ff_framequeue_take(&s->queue));
s->status++;
}
if (s->status == 5 && frame)
return ff_filter_frame(outlink, frame);
FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_WANTED(outlink, inlink);
@ -140,8 +115,6 @@ AVFilter ff_vf_cue = {
.description = NULL_IF_CONFIG_SMALL("Delay filtering to match a cue."),
.priv_size = sizeof(CueContext),
.priv_class = &cue_class,
.init = init,
.uninit = uninit,
.inputs = cue_inputs,
.outputs = cue_outputs,
.activate = activate,
@ -173,8 +146,6 @@ AVFilter ff_af_acue = {
.description = NULL_IF_CONFIG_SMALL("Delay filtering to match a cue."),
.priv_size = sizeof(CueContext),
.priv_class = &acue_class,
.init = init,
.uninit = uninit,
.inputs = acue_inputs,
.outputs = acue_outputs,
.activate = activate,