1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

Implement a common get_filtered_video_frame(), shared between ffplay.c

and ffmpeg.c.

Originally committed as revision 25520 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Stefano Sabatini 2010-10-18 13:57:11 +00:00
parent 16b2691346
commit ff0652e503
4 changed files with 42 additions and 46 deletions

View File

@ -787,4 +787,26 @@ AVFilter ffsink = {
.outputs = (AVFilterPad[]) {{ .name = NULL }},
};
int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
AVFilterBufferRef **picref_ptr, AVRational *tb)
{
int ret;
AVFilterBufferRef *picref;
if ((ret = avfilter_request_frame(ctx->inputs[0])) < 0)
return ret;
if (!(picref = ctx->inputs[0]->cur_buf))
return AVERROR(ENOENT);
*picref_ptr = picref;
ctx->inputs[0]->cur_buf = NULL;
*tb = ctx->inputs[0]->time_base;
memcpy(frame->data, picref->data, sizeof(frame->data));
memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));
frame->interlaced_frame = picref->video->interlaced;
frame->top_field_first = picref->video->top_field_first;
return 1;
}
#endif /* CONFIG_AVFILTER */

View File

@ -270,6 +270,15 @@ typedef struct {
extern AVFilter ffsink;
/**
* Extract a frame from sink.
*
* @return a negative error in case of failure, 1 if one frame has
* been extracted successfully.
*/
int get_filtered_video_frame(AVFilterContext *sink, AVFrame *frame,
AVFilterBufferRef **picref, AVRational *pts_tb);
#endif /* CONFIG_AVFILTER */
#endif /* FFMPEG_CMDUTILS_H */

View File

@ -324,29 +324,6 @@ static struct termios oldtty;
#if CONFIG_AVFILTER
static int get_filtered_video_pic(AVFilterContext *ctx,
AVFilterBufferRef **picref, AVFrame *pic2,
uint64_t *pts)
{
AVFilterBufferRef *pic;
if(avfilter_request_frame(ctx->inputs[0]))
return -1;
if(!(pic = ctx->inputs[0]->cur_buf))
return -1;
*picref = pic;
ctx->inputs[0]->cur_buf = NULL;
*pts = pic->pts;
memcpy(pic2->data, pic->data, sizeof(pic->data));
memcpy(pic2->linesize, pic->linesize, sizeof(pic->linesize));
pic2->interlaced_frame = pic->video->interlaced;
pic2->top_field_first = pic->video->top_field_first;
return 1;
}
static int configure_filters(AVInputStream *ist, AVOutputStream *ost)
{
AVFilterContext *last_filter, *filter;
@ -1600,8 +1577,11 @@ static int output_packet(AVInputStream *ist, int ist_index,
if (start_time == 0 || ist->pts >= start_time)
#if CONFIG_AVFILTER
while (frame_available) {
AVRational ist_pts_tb;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->output_video_filter)
get_filtered_video_pic(ist->output_video_filter, &ist->picref, &picture, &ist->pts);
get_filtered_video_frame(ist->output_video_filter, &picture, &ist->picref, &ist_pts_tb);
if (ist->picref)
ist->pts = ist->picref->pts;
#endif
for(i=0;i<nb_ostreams;i++) {
int frame_size;

View File

@ -1779,27 +1779,6 @@ static AVFilter input_filter =
{ .name = NULL }},
};
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
int64_t *pts, AVRational *tb, int64_t *pos)
{
AVFilterBufferRef *pic;
if(avfilter_request_frame(ctx->inputs[0]))
return -1;
if(!(pic = ctx->inputs[0]->cur_buf))
return -1;
ctx->inputs[0]->cur_buf = NULL;
frame->opaque = pic;
*pts = pic->pts;
*pos = pic->pos;
*tb = ctx->inputs[0]->time_base;
memcpy(frame->data, pic->data, sizeof(frame->data));
memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
return 1;
}
#endif /* CONFIG_AVFILTER */
static int video_thread(void *arg)
@ -1859,12 +1838,18 @@ static int video_thread(void *arg)
#if !CONFIG_AVFILTER
AVPacket pkt;
#else
AVFilterBufferRef *picref;
AVRational tb;
#endif
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
#if CONFIG_AVFILTER
ret = get_filtered_video_frame(filt_out, frame, &pts_int, &tb, &pos);
ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
if (picref) {
pts_int = picref->pts;
pos = picref->pos;
frame->opaque = picref;
}
if (av_cmp_q(tb, is->video_st->time_base)) {
int64_t pts1 = pts_int;