1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avplay: switch to new refcounted frames API

Remove now unused cmdutils get_buffer() implementation.
This commit is contained in:
Anton Khirnov 2013-02-17 16:06:16 +01:00
parent 9b2dc29534
commit 354468fc12
3 changed files with 19 additions and 226 deletions

View File

@ -108,9 +108,7 @@ typedef struct VideoPicture {
int reallocate; int reallocate;
enum AVPixelFormat pix_fmt; enum AVPixelFormat pix_fmt;
#if CONFIG_AVFILTER AVRational sar;
AVFilterBufferRef *picref;
#endif
} VideoPicture; } VideoPicture;
typedef struct SubPicture { typedef struct SubPicture {
@ -217,8 +215,6 @@ typedef struct VideoState {
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterContext *in_video_filter; // the first filter in the video chain AVFilterContext *in_video_filter; // the first filter in the video chain
AVFilterContext *out_video_filter; // the last filter in the video chain AVFilterContext *out_video_filter; // the last filter in the video chain
int use_dr1;
FrameBuffer *buffer_pool;
#endif #endif
float skip_frames; float skip_frames;
@ -656,10 +652,10 @@ static void video_image_display(VideoState *is)
vp = &is->pictq[is->pictq_rindex]; vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) { if (vp->bmp) {
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
if (vp->picref->video->pixel_aspect.num == 0) if (!vp->sar.num)
aspect_ratio = 0; aspect_ratio = 0;
else else
aspect_ratio = av_q2d(vp->picref->video->pixel_aspect); aspect_ratio = av_q2d(vp->sar);
#else #else
/* XXX: use variable in the frame */ /* XXX: use variable in the frame */
@ -1229,9 +1225,6 @@ static void stream_close(VideoState *is)
/* free all pictures */ /* free all pictures */
for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) { for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
vp = &is->pictq[i]; vp = &is->pictq[i];
#if CONFIG_AVFILTER
avfilter_unref_bufferp(&vp->picref);
#endif
if (vp->bmp) { if (vp->bmp) {
SDL_FreeYUVOverlay(vp->bmp); SDL_FreeYUVOverlay(vp->bmp);
vp->bmp = NULL; vp->bmp = NULL;
@ -1279,8 +1272,6 @@ static void alloc_picture(void *opaque)
SDL_FreeYUVOverlay(vp->bmp); SDL_FreeYUVOverlay(vp->bmp);
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
avfilter_unref_bufferp(&vp->picref);
vp->width = is->out_video_filter->inputs[0]->w; vp->width = is->out_video_filter->inputs[0]->w;
vp->height = is->out_video_filter->inputs[0]->h; vp->height = is->out_video_filter->inputs[0]->h;
vp->pix_fmt = is->out_video_filter->inputs[0]->format; vp->pix_fmt = is->out_video_filter->inputs[0]->format;
@ -1369,10 +1360,6 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
/* if the frame is not skipped, then display it */ /* if the frame is not skipped, then display it */
if (vp->bmp) { if (vp->bmp) {
AVPicture pict = { { 0 } }; AVPicture pict = { { 0 } };
#if CONFIG_AVFILTER
avfilter_unref_bufferp(&vp->picref);
vp->picref = src_frame->opaque;
#endif
/* get a pointer on the bitmap */ /* get a pointer on the bitmap */
SDL_LockYUVOverlay (vp->bmp); SDL_LockYUVOverlay (vp->bmp);
@ -1432,6 +1419,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos) static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
{ {
double frame_delay, pts; double frame_delay, pts;
int ret;
pts = pts1; pts = pts1;
@ -1448,7 +1436,9 @@ static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int6
frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
is->video_clock += frame_delay; is->video_clock += frame_delay;
return queue_picture(is, src_frame, pts, pos); ret = queue_picture(is, src_frame, pts, pos);
av_frame_unref(src_frame);
return ret;
} }
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt) static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
@ -1504,7 +1494,7 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
is->skip_frames_index -= FFMAX(is->skip_frames, 1.0); is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
return 1; return 1;
} }
av_frame_unref(frame);
} }
return 0; return 0;
} }
@ -1572,13 +1562,6 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
is->in_video_filter = filt_src; is->in_video_filter = filt_src;
is->out_video_filter = filt_out; is->out_video_filter = filt_out;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
is->use_dr1 = 1;
codec->get_buffer = codec_get_buffer;
codec->release_buffer = codec_release_buffer;
codec->opaque = &is->buffer_pool;
}
return ret; return ret;
} }
@ -1588,7 +1571,7 @@ static int video_thread(void *arg)
{ {
AVPacket pkt = { 0 }; AVPacket pkt = { 0 };
VideoState *is = arg; VideoState *is = arg;
AVFrame *frame = avcodec_alloc_frame(); AVFrame *frame = av_frame_alloc();
int64_t pts_int; int64_t pts_int;
double pts; double pts;
int ret; int ret;
@ -1596,7 +1579,6 @@ static int video_thread(void *arg)
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL, *filt_in = NULL; AVFilterContext *filt_out = NULL, *filt_in = NULL;
int64_t pos;
int last_w = is->video_st->codec->width; int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height; int last_h = is->video_st->codec->height;
@ -1608,7 +1590,6 @@ static int video_thread(void *arg)
for (;;) { for (;;) {
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterBufferRef *picref;
AVRational tb; AVRational tb;
#endif #endif
while (is->paused && !is->videoq.abort_request) while (is->paused && !is->videoq.abort_request)
@ -1639,38 +1620,19 @@ static int video_thread(void *arg)
} }
frame->pts = pts_int; frame->pts = pts_int;
if (is->use_dr1) { ret = av_buffersrc_add_frame(filt_in, frame);
FrameBuffer *buf = frame->opaque; if (ret < 0)
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( goto the_end;
frame->data, frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
frame->width, frame->height,
frame->format);
avfilter_copy_frame_props(fb, frame);
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(filt_in, fb);
} else
av_buffersrc_write_frame(filt_in, frame);
while (ret >= 0) { while (ret >= 0) {
ret = av_buffersink_read(filt_out, &picref); ret = av_buffersink_get_frame(filt_out, frame);
if (ret < 0) { if (ret < 0) {
ret = 0; ret = 0;
break; break;
} }
avfilter_copy_buf_props(frame, picref); pts_int = frame->pts;
pts_int = picref->pts;
tb = filt_out->inputs[0]->time_base; tb = filt_out->inputs[0]->time_base;
pos = picref->pos;
frame->opaque = picref;
if (av_cmp_q(tb, is->video_st->time_base)) { if (av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int; av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
@ -1680,7 +1642,7 @@ static int video_thread(void *arg)
is->video_st->time_base.num, is->video_st->time_base.den, pts_int); is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
} }
pts = pts_int * av_q2d(is->video_st->time_base); pts = pts_int * av_q2d(is->video_st->time_base);
ret = output_picture2(is, frame, pts, pos); ret = output_picture2(is, frame, pts, 0);
} }
#else #else
pts = pts_int * av_q2d(is->video_st->time_base); pts = pts_int * av_q2d(is->video_st->time_base);
@ -1690,6 +1652,7 @@ static int video_thread(void *arg)
if (ret < 0) if (ret < 0)
goto the_end; goto the_end;
if (step) if (step)
if (cur_stream) if (cur_stream)
stream_pause(cur_stream); stream_pause(cur_stream);
@ -1700,7 +1663,7 @@ static int video_thread(void *arg)
avfilter_graph_free(&graph); avfilter_graph_free(&graph);
#endif #endif
av_free_packet(&pkt); av_free_packet(&pkt);
avcodec_free_frame(&frame); av_frame_free(&frame);
return 0; return 0;
} }
@ -2090,6 +2053,8 @@ static int stream_component_open(VideoState *is, int stream_index)
if (!av_dict_get(opts, "threads", NULL, 0)) if (!av_dict_get(opts, "threads", NULL, 0))
av_dict_set(&opts, "threads", "auto", 0); av_dict_set(&opts, "threads", "auto", 0);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if (!codec || if (!codec ||
avcodec_open2(avctx, codec, &opts) < 0) avcodec_open2(avctx, codec, &opts) < 0)
return -1; return -1;
@ -2235,9 +2200,6 @@ static void stream_component_close(VideoState *is, int stream_index)
ic->streams[stream_index]->discard = AVDISCARD_ALL; ic->streams[stream_index]->discard = AVDISCARD_ALL;
avcodec_close(avctx); avcodec_close(avctx);
#if CONFIG_AVFILTER
free_buffer_pool(&is->buffer_pool);
#endif
switch (avctx->codec_type) { switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
is->audio_st = NULL; is->audio_st = NULL;

View File

@ -1543,129 +1543,3 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
} }
return array; return array;
} }
static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
FrameBuffer *buf;
int i, ret;
int pixel_size;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!desc)
return AVERROR(EINVAL);
pixel_size = desc->comp[0].step_minus1 + 1;
buf = av_mallocz(sizeof(*buf));
if (!buf)
return AVERROR(ENOMEM);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
avcodec_align_dimensions(s, &w, &h);
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
av_pix_fmt_get_chroma_sub_sample(s->pix_fmt,
&h_chroma_shift, &v_chroma_shift);
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if (s->flags & CODEC_FLAG_EMU_EDGE)
buf->data[i] = buf->base[i];
else if (buf->base[i])
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->pool = pool;
*pbuf = buf;
return 0;
}
int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer **pool = s->opaque;
FrameBuffer *buf;
int ret, i;
if (!*pool && (ret = alloc_buffer(pool, s, pool)) < 0)
return ret;
buf = *pool;
*pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(pool, s, &buf)) < 0)
return ret;
}
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void unref_buffer(FrameBuffer *buf)
{
FrameBuffer **pool = buf->pool;
av_assert0(buf->refcount);
buf->refcount--;
if (!buf->refcount) {
buf->next = *pool;
*pool = buf;
}
}
void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer *buf = frame->opaque;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(buf);
}
void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf);
}
void free_buffer_pool(FrameBuffer **pool)
{
FrameBuffer *buf = *pool;
while (buf) {
*pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = *pool;
}
}

View File

@ -510,49 +510,6 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
#define GROW_ARRAY(array, nb_elems)\ #define GROW_ARRAY(array, nb_elems)\
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1) array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum AVPixelFormat pix_fmt;
int refcount;
struct FrameBuffer **pool; ///< head of the buffer pool
struct FrameBuffer *next;
} FrameBuffer;
/**
* Get a frame from the pool. This is intended to be used as a callback for
* AVCodecContext.get_buffer.
*
* @param s codec context. s->opaque must be a pointer to the head of the
* buffer pool.
* @param frame frame->opaque will be set to point to the FrameBuffer
* containing the frame data.
*/
int codec_get_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVCodecContext.release_buffer along with
* codec_get_buffer().
*/
void codec_release_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVFilterBuffer.free.
* @param fb buffer to free. fb->priv must be a pointer to the FrameBuffer
* containing the buffer data.
*/
void filter_release_buffer(AVFilterBuffer *fb);
/**
* Free all the buffers in the pool. This must be called after all the
* buffers have been released.
*/
void free_buffer_pool(FrameBuffer **pool);
#define GET_PIX_FMT_NAME(pix_fmt)\ #define GET_PIX_FMT_NAME(pix_fmt)\
const char *name = av_get_pix_fmt_name(pix_fmt); const char *name = av_get_pix_fmt_name(pix_fmt);