1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Move output_example.c and ffplay.c to the swscale interface

Originally committed as revision 5923 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Luca Abeni 2006-08-03 16:55:36 +00:00
parent dfeb80a5a9
commit 03ae87a3e8
2 changed files with 33 additions and 6 deletions

View File

@ -18,6 +18,7 @@
*/ */
#define HAVE_AV_CONFIG_H #define HAVE_AV_CONFIG_H
#include "avformat.h" #include "avformat.h"
#include "swscale.h"
#include "version.h" #include "version.h"
#include "cmdutils.h" #include "cmdutils.h"
@ -70,6 +71,8 @@
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */ /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
#define SAMPLE_ARRAY_SIZE (2*65536) #define SAMPLE_ARRAY_SIZE (2*65536)
static int sws_flags = SWS_BICUBIC;
typedef struct PacketQueue { typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt; AVPacketList *first_pkt, *last_pkt;
int nb_packets; int nb_packets;
@ -1143,6 +1146,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
VideoPicture *vp; VideoPicture *vp;
int dst_pix_fmt; int dst_pix_fmt;
AVPicture pict; AVPicture pict;
static struct SwsContext *img_convert_ctx;
/* wait until we have space to put a new picture */ /* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex); SDL_LockMutex(is->pictq_mutex);
@ -1195,9 +1199,18 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1]; pict.linesize[2] = vp->bmp->pitches[1];
img_convert(&pict, dst_pix_fmt, if (img_convert_ctx == NULL) {
(AVPicture *)src_frame, is->video_st->codec->pix_fmt, img_convert_ctx = sws_getContext(is->video_st->codec->width,
is->video_st->codec->width, is->video_st->codec->height); is->video_st->codec->height, is->video_st->codec->pix_fmt,
is->video_st->codec->width, is->video_st->codec->height,
dst_pix_fmt, sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
0, is->video_st->codec->height, pict.data, pict.linesize);
/* update the bitmap content */ /* update the bitmap content */
SDL_UnlockYUVOverlay(vp->bmp); SDL_UnlockYUVOverlay(vp->bmp);

View File

@ -32,6 +32,7 @@
#endif #endif
#include "avformat.h" #include "avformat.h"
#include "swscale.h"
/* 5 seconds stream duration */ /* 5 seconds stream duration */
#define STREAM_DURATION 5.0 #define STREAM_DURATION 5.0
@ -39,6 +40,8 @@
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
/**************************************************************/ /**************************************************************/
/* audio output */ /* audio output */
@ -319,6 +322,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
{ {
int out_size, ret; int out_size, ret;
AVCodecContext *c; AVCodecContext *c;
static struct SwsContext *img_convert_ctx;
c = st->codec; c = st->codec;
@ -330,10 +334,20 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
if (c->pix_fmt != PIX_FMT_YUV420P) { if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it /* as we only generate a YUV420P picture, we must convert it
to the codec pixel format if needed */ to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(tmp_picture, frame_count, c->width, c->height); fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
img_convert((AVPicture *)picture, c->pix_fmt, sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
(AVPicture *)tmp_picture, PIX_FMT_YUV420P, 0, c->height, picture->data, picture->linesize);
c->width, c->height);
} else { } else {
fill_yuv_image(picture, frame_count, c->width, c->height); fill_yuv_image(picture, frame_count, c->width, c->height);
} }