1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

lavfi: pass the hw frames context through the filter chain

This commit is contained in:
Anton Khirnov 2016-02-02 09:47:16 +01:00
parent ad884d1002
commit b3dd30db0b
7 changed files with 191 additions and 10 deletions

View File

@ -13,6 +13,10 @@ libavutil: 2015-08-28
API changes, most recent first: API changes, most recent first:
2016-xx-xx - lavfi 6.2.0 - avfilter.h
xxxxxxx avfilter.h - Add AVFilterLink.hw_frames_ctx.
xxxxxxx buffersrc.h - Add AVBufferSrcParameters and functions for handling it.
2016-xx-xx - lavu 55.6.0 2016-xx-xx - lavu 55.6.0
xxxxxxx buffer.h - Add av_buffer_pool_init2(). xxxxxxx buffer.h - Add av_buffer_pool_init2().
xxxxxxx hwcontext.h - Add a new installed header hwcontext.h with a new API xxxxxxx hwcontext.h - Add a new installed header hwcontext.h with a new API

View File

@ -2864,6 +2864,10 @@ The time base used for input timestamps.
@item sar @item sar
The sample (pixel) aspect ratio of the input video. The sample (pixel) aspect ratio of the input video.
@item hw_frames_ctx
When using a hardware pixel format, this should be a reference to an
AVHWFramesContext describing input frames.
@end table @end table
For example: For example:

View File

@ -20,8 +20,10 @@
*/ */
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/buffer.h"
#include "libavutil/channel_layout.h" #include "libavutil/channel_layout.h"
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/internal.h" #include "libavutil/internal.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
@ -217,6 +219,17 @@ int avfilter_config_links(AVFilterContext *filter)
return ret; return ret;
} }
if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
!link->hw_frames_ctx) {
AVHWFramesContext *input_ctx = (AVHWFramesContext*)link->src->inputs[0]->hw_frames_ctx->data;
if (input_ctx->format == link->format) {
link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
if (!link->hw_frames_ctx)
return AVERROR(ENOMEM);
}
}
link->init_state = AVLINK_INIT; link->init_state = AVLINK_INIT;
} }
} }
@ -481,6 +494,8 @@ static void free_link(AVFilterLink *link)
if (link->dst) if (link->dst)
link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL; link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
av_buffer_unref(&link->hw_frames_ctx);
ff_formats_unref(&link->in_formats); ff_formats_unref(&link->in_formats);
ff_formats_unref(&link->out_formats); ff_formats_unref(&link->out_formats);
ff_formats_unref(&link->in_samplerates); ff_formats_unref(&link->in_samplerates);

View File

@ -35,6 +35,7 @@
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h" #include "libavutil/frame.h"
#include "libavutil/log.h" #include "libavutil/log.h"
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
@ -387,6 +388,12 @@ struct AVFilterLink {
* Sinks can use it to set a default output frame rate. * Sinks can use it to set a default output frame rate.
*/ */
AVRational frame_rate; AVRational frame_rate;
/**
* For hwaccel pixel formats, this should be a reference to the
* AVHWFramesContext describing the frames.
*/
AVBufferRef *hw_frames_ctx;
}; };
/** /**

View File

@ -52,6 +52,8 @@ typedef struct BufferSourceContext {
char *pix_fmt_str; char *pix_fmt_str;
AVRational pixel_aspect; AVRational pixel_aspect;
AVBufferRef *hw_frames_ctx;
/* audio only */ /* audio only */
int sample_rate; int sample_rate;
enum AVSampleFormat sample_fmt; enum AVSampleFormat sample_fmt;
@ -59,6 +61,7 @@ typedef struct BufferSourceContext {
uint64_t channel_layout; uint64_t channel_layout;
char *channel_layout_str; char *channel_layout_str;
int got_format_from_params;
int eof; int eof;
} BufferSourceContext; } BufferSourceContext;
@ -75,6 +78,62 @@ typedef struct BufferSourceContext {
return AVERROR(EINVAL);\ return AVERROR(EINVAL);\
} }
AVBufferSrcParameters *av_buffersrc_parameters_alloc(void)
{
AVBufferSrcParameters *par = av_mallocz(sizeof(*par));
if (!par)
return NULL;
par->format = -1;
return par;
}
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
{
BufferSourceContext *s = ctx->priv;
if (param->time_base.num > 0 && param->time_base.den > 0)
s->time_base = param->time_base;
switch (ctx->filter->outputs[0].type) {
case AVMEDIA_TYPE_VIDEO:
if (param->format != AV_PIX_FMT_NONE) {
s->got_format_from_params = 1;
s->pix_fmt = param->format;
}
if (param->width > 0)
s->w = param->width;
if (param->height > 0)
s->h = param->height;
if (param->sample_aspect_ratio.num > 0 && param->sample_aspect_ratio.den > 0)
s->pixel_aspect = param->sample_aspect_ratio;
if (param->frame_rate.num > 0 && param->frame_rate.den > 0)
s->frame_rate = param->frame_rate;
if (param->hw_frames_ctx) {
av_buffer_unref(&s->hw_frames_ctx);
s->hw_frames_ctx = av_buffer_ref(param->hw_frames_ctx);
if (!s->hw_frames_ctx)
return AVERROR(ENOMEM);
}
break;
case AVMEDIA_TYPE_AUDIO:
if (param->format != AV_SAMPLE_FMT_NONE) {
s->got_format_from_params = 1;
s->sample_fmt = param->format;
}
if (param->sample_rate > 0)
s->sample_rate = param->sample_rate;
if (param->channel_layout)
s->channel_layout = param->channel_layout;
break;
default:
return AVERROR_BUG;
}
return 0;
}
int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame) int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
{ {
AVFrame *copy; AVFrame *copy;
@ -150,17 +209,20 @@ static av_cold int init_video(AVFilterContext *ctx)
{ {
BufferSourceContext *c = ctx->priv; BufferSourceContext *c = ctx->priv;
if (!c->pix_fmt_str || !c->w || !c->h || av_q2d(c->time_base) <= 0) { if (!(c->pix_fmt_str || c->got_format_from_params) || !c->w || !c->h ||
av_q2d(c->time_base) <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n"); av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) { if (c->pix_fmt_str) {
char *tail; if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) {
c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10); char *tail;
if (*tail || c->pix_fmt < 0 || !av_pix_fmt_desc_get(c->pix_fmt)) { c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10);
av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str); if (*tail || c->pix_fmt < 0 || !av_pix_fmt_desc_get(c->pix_fmt)) {
return AVERROR(EINVAL); av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str);
return AVERROR(EINVAL);
}
} }
} }
@ -223,14 +285,22 @@ static av_cold int init_audio(AVFilterContext *ctx)
BufferSourceContext *s = ctx->priv; BufferSourceContext *s = ctx->priv;
int ret = 0; int ret = 0;
s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str); if (!(s->sample_fmt_str || s->got_format_from_params)) {
av_log(ctx, AV_LOG_ERROR, "Sample format not provided\n");
return AVERROR(EINVAL);
}
if (s->sample_fmt_str)
s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
if (s->sample_fmt == AV_SAMPLE_FMT_NONE) { if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n", av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
s->sample_fmt_str); s->sample_fmt_str);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
s->channel_layout = av_get_channel_layout(s->channel_layout_str); if (s->channel_layout_str)
s->channel_layout = av_get_channel_layout(s->channel_layout_str);
if (!s->channel_layout) { if (!s->channel_layout) {
av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n", av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
s->channel_layout_str); s->channel_layout_str);
@ -258,6 +328,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
av_frame_free(&frame); av_frame_free(&frame);
} }
av_buffer_unref(&s->hw_frames_ctx);
av_fifo_free(s->fifo); av_fifo_free(s->fifo);
s->fifo = NULL; s->fifo = NULL;
} }
@ -300,6 +371,12 @@ static int config_props(AVFilterLink *link)
link->w = c->w; link->w = c->w;
link->h = c->h; link->h = c->h;
link->sample_aspect_ratio = c->pixel_aspect; link->sample_aspect_ratio = c->pixel_aspect;
if (c->hw_frames_ctx) {
link->hw_frames_ctx = av_buffer_ref(c->hw_frames_ctx);
if (!link->hw_frames_ctx)
return AVERROR(ENOMEM);
}
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
link->channel_layout = c->channel_layout; link->channel_layout = c->channel_layout;

View File

@ -34,6 +34,80 @@
* @{ * @{
*/ */
/**
* This structure contains the parameters describing the frames that will be
* passed to this filter.
*
* It should be allocated with av_buffersrc_parameters_alloc() and freed with
* av_free(). All the allocated fields in it remain owned by the caller.
*/
typedef struct AVBufferSrcParameters {
/**
* video: the pixel format, value corresponds to enum AVPixelFormat
* audio: the sample format, value corresponds to enum AVSampleFormat
*/
int format;
/**
* The timebase to be used for the timestamps on the input frames.
*/
AVRational time_base;
/**
* Video only, the display dimensions of the input frames.
*/
int width, height;
/**
* Video only, the sample (pixel) aspect ratio.
*/
AVRational sample_aspect_ratio;
/**
* Video only, the frame rate of the input video. This field must only be
* set to a non-zero value if input stream has a known constant framerate
* and should be left at its initial value if the framerate is variable or
* unknown.
*/
AVRational frame_rate;
/**
* Video with a hwaccel pixel format only. This should be a reference to an
* AVHWFramesContext instance describing the input frames.
*/
AVBufferRef *hw_frames_ctx;
/**
* Audio only, the audio sampling rate in samples per secon.
*/
int sample_rate;
/**
* Audio only, the audio channel layout
*/
uint64_t channel_layout;
} AVBufferSrcParameters;
/**
* Allocate a new AVBufferSrcParameters instance. It should be freed by the
* caller with av_free().
*/
AVBufferSrcParameters *av_buffersrc_parameters_alloc(void);
/**
* Initialize the buffersrc or abuffersrc filter with the provided parameters.
* This function may be called multiple times, the later calls override the
* previous ones. Some of the parameters may also be set through AVOptions, then
* whatever method is used last takes precedence.
*
* @param ctx an instance of the buffersrc or abuffersrc filter
* @param param the stream parameters. The frames later passed to this filter
* must conform to those parameters. All the allocated fields in
* param remain owned by the caller, libavfilter will make internal
* copies or references when necessary.
* @return 0 on success, a negative AVERROR code on failure.
*/
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param);
/** /**
* Add a frame to the buffer source. * Add a frame to the buffer source.
* *

View File

@ -30,7 +30,7 @@
#include "libavutil/version.h" #include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 6 #define LIBAVFILTER_VERSION_MAJOR 6
#define LIBAVFILTER_VERSION_MINOR 1 #define LIBAVFILTER_VERSION_MINOR 2
#define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \