1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-11-06 08:29:25 +02:00

COSMETICS: Remove all trailing whitespace.

Originally committed as revision 4749 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Diego Biurrun
2005-12-17 18:14:38 +00:00
parent d76319b1ab
commit 115329f160
340 changed files with 13115 additions and 13116 deletions

View File

@@ -11,14 +11,14 @@ HOOKS=null$(SLIBSUF) fish$(SLIBSUF) ppm$(SLIBSUF) watermark$(SLIBSUF)
ifeq ($(HAVE_IMLIB2),yes)
HOOKS += imlib2$(SLIBSUF)
endif
endif
ifeq ($(HAVE_FREETYPE2),yes)
HOOKS += drawtext$(SLIBSUF)
CFLAGS += `freetype-config --cflags`
endif
endif
all: $(HOOKS)
all: $(HOOKS)
SRCS := $(HOOKS:$(SLIBSUF)=.c)

View File

@@ -9,8 +9,8 @@
* -x <pos> x position ( >= 0) [default 0]
* -y <pos> y position ( >= 0) [default 0]
* -t <text> text to print (will be passed to strftime())
* MANDATORY: will be used even when -T is used.
* in this case, -t will be used if some error
* MANDATORY: will be used even when -T is used.
* in this case, -t will be used if some error
* occurs
* -T <filename> file with the text (re-read every frame)
* -c <#RRGGBB> foreground color ('internet' way) [default #ffffff]
@@ -97,7 +97,7 @@ typedef struct {
unsigned char fgcolor[3]; /* YUV */
FT_Library library;
FT_Face face;
FT_Glyph glyphs[ 255 ];
FT_Glyph glyphs[ 255 ];
FT_Bitmap bitmaps[ 255 ];
int advance[ 255 ];
int bitmap_left[ 255 ];
@@ -213,7 +213,7 @@ int Configure(void **ctxp, int argc, char *argv[])
}
}
if (!ci->text)
if (!ci->text)
{
fprintf(stderr,"ERROR: No text provided (-t text)\n");
return -1;
@@ -249,7 +249,7 @@ int Configure(void **ctxp, int argc, char *argv[])
fprintf(stderr,"ERROR: Could not load face: %s (error# %d)\n",font, error);
return -1;
}
if ((error = FT_Set_Pixel_Sizes( ci->face, 0, size)) != 0)
{
fprintf(stderr,"ERROR: Could not set font size to %d pixels (error# %d)\n",size, error);
@@ -266,7 +266,7 @@ int Configure(void **ctxp, int argc, char *argv[])
/* Load char */
error = FT_Load_Char( ci->face, (unsigned char) c, FT_LOAD_RENDER | FT_LOAD_MONOCHROME );
if (error) continue; /* ignore errors */
/* Save bitmap */
ci->bitmaps[c] = ci->face->glyph->bitmap;
/* Save bitmap left */
@@ -280,7 +280,7 @@ int Configure(void **ctxp, int argc, char *argv[])
/* Save glyph */
error = FT_Get_Glyph( ci->face->glyph, &(ci->glyphs[c]) );
/* Save glyph index */
ci->glyphs_index[c] = FT_Get_Char_Index( ci->face, (unsigned char) c );
ci->glyphs_index[c] = FT_Get_Char_Index( ci->face, (unsigned char) c );
/* Measure text height to calculate text_height (or the maximum text height) */
FT_Glyph_Get_CBox( ci->glyphs[ c ], ft_glyph_bbox_pixels, &bbox );
@@ -288,7 +288,7 @@ int Configure(void **ctxp, int argc, char *argv[])
yMax = bbox.yMax;
if (bbox.yMin < yMin)
yMin = bbox.yMin;
}
ci->text_height = yMax - yMin;
@@ -316,14 +316,14 @@ inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, un
GET_PIXEL(picture, dpixel, (c+x), (y+r));
/* pixel in the glyph bitmap (source) */
spixel = bitmap->buffer[r*bitmap->pitch +c/8] & (0x80>>(c%8));
if (spixel)
spixel = bitmap->buffer[r*bitmap->pitch +c/8] & (0x80>>(c%8));
if (spixel)
COPY_3(dpixel, yuv_fgcolor);
if (outline)
{
/* border detection: */
/* border detection: */
if ( (!in_glyph) && (spixel) )
/* left border detected */
{
@@ -339,8 +339,8 @@ inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, un
/* 'draw' right pixel border */
COPY_3(dpixel, yuv_bgcolor);
}
if (in_glyph)
if (in_glyph)
/* see if we have a top/bottom border */
{
/* top */
@@ -352,10 +352,10 @@ inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, un
if ( (r+1 < height) && (! bitmap->buffer[(r+1)*bitmap->pitch +c/8] & (0x80>>(c%8))) )
/* we have a bottom border */
SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r+1));
}
}
SET_PIXEL(picture, dpixel, (c+x), (y+r));
}
}
@@ -368,11 +368,11 @@ inline void draw_box(AVPicture *picture, unsigned int x, unsigned int y, unsigne
int i, j;
for (j = 0; (j < height); j++)
for (i = 0; (i < width); i++)
{
for (i = 0; (i < width); i++)
{
SET_PIXEL(picture, yuv_color, (i+x), (y+j));
}
}
@@ -382,7 +382,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
{
ContextInfo *ci = (ContextInfo *) ctx;
FT_Face face = ci->face;
FT_GlyphSlot slot = face->glyph;
FT_GlyphSlot slot = face->glyph;
unsigned char *text = ci->text;
unsigned char c;
int x = 0, y = 0, i=0, size=0;
@@ -390,28 +390,28 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
unsigned char tbuff[MAXSIZE_TEXT];
time_t now = time(0);
int str_w, str_w_max;
FT_Vector pos[MAXSIZE_TEXT];
FT_Vector pos[MAXSIZE_TEXT];
FT_Vector delta;
if (ci->file)
if (ci->file)
{
int fd = open(ci->file, O_RDONLY);
if (fd < 0)
if (fd < 0)
{
text = ci->text;
perror("WARNING: the file could not be opened. Using text provided with -t switch. ");
}
else
}
else
{
int l = read(fd, tbuff, sizeof(tbuff) - 1);
if (l >= 0)
if (l >= 0)
{
tbuff[l] = 0;
text = tbuff;
}
else
}
else
{
text = ci->text;
perror("WARNING: the file could not be opened. Using text provided with -t switch. ");
@@ -429,13 +429,13 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
text = buff;
size = strlen(text);
/* measure string size and save glyphs position*/
str_w = str_w_max = 0;
x = ci->x;
x = ci->x;
y = ci->y;
for (i=0; i < size; i++)
{
@@ -444,15 +444,15 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
/* kerning */
if ( (ci->use_kerning) && (i > 0) && (ci->glyphs_index[c]) )
{
FT_Get_Kerning( ci->face,
ci->glyphs_index[ text[i-1] ],
FT_Get_Kerning( ci->face,
ci->glyphs_index[ text[i-1] ],
ci->glyphs_index[c],
ft_kerning_default,
ft_kerning_default,
&delta );
x += delta.x >> 6;
}
if (( (x + ci->advance[ c ]) >= width ) || ( c == '\n' ))
{
str_w = width - ci->x - 1;
@@ -475,7 +475,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
}
if (ci->bg)
@@ -487,7 +487,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
y = height - 1 - 2*ci->y;
/* Draw Background */
draw_box( picture, ci->x, ci->y, str_w_max, y - ci->y, ci->bgcolor );
draw_box( picture, ci->x, ci->y, str_w_max, y - ci->y, ci->bgcolor );
}
@@ -498,24 +498,24 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
c = text[i];
if (
( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
IF text was specified in cmd line
( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
IF text was specified in cmd line
(which doesn't like neasted quotes) */
( c == '\n' ) /* Skip new line char, just go to new line */
)
continue;
/* now, draw to our target surface */
draw_glyph( picture,
draw_glyph( picture,
&(ci->bitmaps[ c ]),
pos[i].x,
pos[i].y,
width,
width,
height,
ci->fgcolor,
ci->bgcolor,
ci->outline );
/* increment pen position */
x += slot->advance.x >> 6;
}

View File

@@ -2,7 +2,7 @@
* Fish Detector Hook
* Copyright (c) 2002 Philip Gladstone
*
* This file implements a fish detector. It is used to see when a
* This file implements a fish detector. It is used to see when a
* goldfish passes in front of the camera. It does this by counting
* the number of input pixels that fall within a particular HSV
* range.
@@ -69,14 +69,14 @@
}
typedef struct {
int h; /* 0 .. 360 */
int s; /* 0 .. 255 */
int v; /* 0 .. 255 */
} HSV;
typedef struct {
int zapping;
int threshold;
@@ -180,21 +180,21 @@ int Configure(void **ctxp, int argc, char *argv[])
static void get_hsv(HSV *hsv, int r, int g, int b)
{
int i, v, x, f;
x = (r < g) ? r : g;
if (b < x)
x = b;
v = (r > g) ? r : g;
if (b > v)
v = b;
if (v == x) {
hsv->h = 0;
hsv->s = 0;
hsv->v = v;
return;
}
if (r == v) {
f = g - b;
i = 0;
@@ -205,21 +205,21 @@ static void get_hsv(HSV *hsv, int r, int g, int b)
f = r - g;
i = 4 * 60;
}
hsv->h = i + (60 * f) / (v - x);
if (hsv->h < 0)
hsv->h += 360;
hsv->s = (255 * (v - x)) / v;
hsv->v = v;
return;
}
}
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
{
ContextInfo *ci = (ContextInfo *) ctx;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
uint8_t *cm = cropTbl + MAX_NEG_CROP;
int rowsize = picture->linesize[0];
#if 0
@@ -233,7 +233,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
if (width < ci->min_width)
return;
ci->next_pts = pts + 1000000;
ci->next_pts = pts + 1000000;
if (pix_fmt == PIX_FMT_YUV420P) {
uint8_t *y, *u, *v;
@@ -269,14 +269,14 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
get_hsv(&hsv, r, g, b);
if (ci->debug > 1)
if (ci->debug > 1)
fprintf(stderr, "(%d,%d,%d) -> (%d,%d,%d)\n",
r,g,b,hsv.h,hsv.s,hsv.v);
if (hsv.h >= ci->dark.h && hsv.h <= ci->bright.h &&
hsv.s >= ci->dark.s && hsv.s <= ci->bright.s &&
hsv.v >= ci->dark.v && hsv.v <= ci->bright.v) {
hsv.v >= ci->dark.v && hsv.v <= ci->bright.v) {
inrange++;
} else if (ci->zapping) {
y[0] = y[1] = y[rowsize] = y[rowsize + 1] = 16;
@@ -294,7 +294,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
v += picture->linesize[2] - (w_start - w_end);
}
if (ci->debug)
if (ci->debug)
fprintf(stderr, "Fish: Inrange=%d of %d = %d threshold\n", inrange, pixcnt, 1000 * inrange / pixcnt);
if (inrange * 1000 / pixcnt >= ci->threshold) {
@@ -331,7 +331,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
buf = av_malloc(size);
avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
if (img_convert(&picture1, PIX_FMT_RGB24,
if (img_convert(&picture1, PIX_FMT_RGB24,
picture, pix_fmt, width, height) >= 0) {
/* Write out the PPM file */
@@ -348,7 +348,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
}
av_free(buf);
ci->next_pts = pts + ci->min_interval;
ci->next_pts = pts + ci->min_interval;
}
}
}

View File

@@ -1,7 +1,7 @@
/*
* imlib2 based hook
* imlib2 based hook
* Copyright (c) 2002 Philip Gladstone
*
*
* This module implements a text overlay for a video image. Currently it
* supports a fixed overlay or reading the text from a file. The string
* is passed through strftime so that it is easy to imprint the date and
@@ -19,13 +19,13 @@
* This module is very much intended as an example of what could be done.
* For example, you could overlay an image (even semi-transparent) like
* TV stations do. You can manipulate the image using imlib2 functions
* in any way.
* in any way.
*
* One caution is that this is an expensive process -- in particular the
* conversion of the image into RGB and back is time consuming. For some
* special cases -- e.g. painting black text -- it would be faster to paint
* the text into a bitmap and then combine it directly into the YUV
* image. However, this code is fast enough to handle 10 fps of 320x240 on a
* image. However, this code is fast enough to handle 10 fps of 320x240 on a
* 900MHz Duron in maybe 15% of the CPU.
*
* This library is free software; you can redistribute it and/or
@@ -55,7 +55,7 @@
#include <sys/time.h>
#include <time.h>
#include <X11/Xlib.h>
#include <Imlib2.h>
#include <Imlib2.h>
typedef struct {
int dummy;
@@ -138,7 +138,7 @@ int Configure(void **ctxp, int argc, char *argv[])
return -1;
}
imlib_context_set_font(ci->fn);
imlib_context_set_direction(IMLIB_TEXT_TO_RIGHT);
imlib_context_set_direction(IMLIB_TEXT_TO_RIGHT);
if (color) {
char buff[256];
@@ -215,18 +215,18 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGBA32, width, height);
if (pix_fmt != PIX_FMT_RGBA32) {
if (img_convert(&picture1, PIX_FMT_RGBA32,
if (img_convert(&picture1, PIX_FMT_RGBA32,
picture, pix_fmt, width, height) < 0) {
goto done;
}
} else {
img_copy(&picture1, picture, PIX_FMT_RGBA32, width, height);
img_copy(&picture1, picture, PIX_FMT_RGBA32, width, height);
}
imlib_image_set_has_alpha(0);
{
int wid, hig, h_a, v_a;
int wid, hig, h_a, v_a;
char buff[1000];
char tbuff[1000];
char *tbp = ci->text;
@@ -268,11 +268,11 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
}
if (pix_fmt != PIX_FMT_RGBA32) {
if (img_convert(picture, pix_fmt,
if (img_convert(picture, pix_fmt,
&picture1, PIX_FMT_RGBA32, width, height) < 0) {
}
} else {
img_copy(picture, &picture1, PIX_FMT_RGBA32, width, height);
img_copy(picture, &picture1, PIX_FMT_RGBA32, width, height);
}
done:

View File

@@ -1,5 +1,5 @@
/*
* Null Video Hook
* Null Video Hook
* Copyright (c) 2002 Philip Gladstone
*
* This library is free software; you can redistribute it and/or
@@ -57,7 +57,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
buf = av_malloc(size);
avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
if (img_convert(&picture1, PIX_FMT_RGB24,
if (img_convert(&picture1, PIX_FMT_RGB24,
picture, pix_fmt, width, height) < 0) {
av_free(buf);
return;
@@ -68,7 +68,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
/* Insert filter code here */
if (pix_fmt != PIX_FMT_RGB24) {
if (img_convert(picture, pix_fmt,
if (img_convert(picture, pix_fmt,
&picture1, PIX_FMT_RGB24, width, height) < 0) {
}
}

View File

@@ -1,5 +1,5 @@
/*
* PPM Video Hook
* PPM Video Hook
* Copyright (c) 2003 Charles Yates
*
* This library is free software; you can redistribute it and/or
@@ -122,7 +122,7 @@ int rwpipe_read_number( rwpipe *rw )
int c = 0;
FILE *in = rwpipe_reader( rw );
do
do
{
c = fgetc( in );
@@ -181,14 +181,14 @@ void rwpipe_close( rwpipe *this )
/** Context info for this vhook - stores the pipe and image buffers.
*/
typedef struct
typedef struct
{
rwpipe *rw;
int size1;
char *buf1;
int size2;
char *buf2;
}
}
ContextInfo;
/** Initialise the context info for this vhook.
@@ -231,7 +231,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
err = 1;
/* Convert to RGB24 if necessary */
if ( !err && pix_fmt != PIX_FMT_RGB24 )
if ( !err && pix_fmt != PIX_FMT_RGB24 )
{
int size = avpicture_get_size(PIX_FMT_RGB24, width, height);
@@ -295,9 +295,9 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
if ( !err )
{
/* Actually, this is wrong, since the out_width/out_height returned from the
* filter won't necessarily be the same as width and height - img_resample
* won't scale rgb24, so the only way out of this is to convert to something
* that img_resample does like [which may or may not be pix_fmt], rescale
* filter won't necessarily be the same as width and height - img_resample
* won't scale rgb24, so the only way out of this is to convert to something
* that img_resample does like [which may or may not be pix_fmt], rescale
* and finally convert to pix_fmt... slow, but would provide the most flexibility.
*
* Currently, we take the upper left width/height pixels from the filtered image,
@@ -307,7 +307,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width,
* are gracefully ignored and the original image is returned - in this case, a
* failure may corrupt the input.
*/
if (img_convert(picture, pix_fmt, &picture2, PIX_FMT_RGB24, width, height) < 0)
if (img_convert(picture, pix_fmt, &picture2, PIX_FMT_RGB24, width, height) < 0)
{
}
}

View File

@@ -1,5 +1,5 @@
/*
* Watermark Hook
* Watermark Hook
* Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se
*
* The watermarkpicture works like this. (Assuming colorintencities 0..0xff)
@@ -11,7 +11,7 @@
* > 0xff, result = 0xff
*
* This way a mask that is visible both in light pictures and in dark can be
* made (fex by using a picture generated by gimp and the bump map tool).
* made (fex by using a picture generated by gimp and the bump map tool).
*
* An example watermark file is at
* http://engene.se/ffmpeg_watermark.gif
@@ -41,7 +41,7 @@
//#include <fcntl.h>
#include <unistd.h>
#include <stdarg.h>
#include "common.h"
#include "avformat.h"
@@ -57,8 +57,8 @@ typedef struct {
const char *p_ext;
int videoStream;
int frameFinished;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int numBytes;
@@ -66,7 +66,7 @@ typedef struct {
int i;
AVInputFormat *file_iformat;
AVStream *st;
int is_done;
int is_done;
AVFrame *pFrameRGB;
} ContextInfo;
@@ -74,7 +74,7 @@ int get_watermark_picture(ContextInfo *ci, int cleanup);
/****************************************************************************
*
*
****************************************************************************/
void Release(void *ctx)
{
@@ -82,14 +82,14 @@ void Release(void *ctx)
ci = (ContextInfo *) ctx;
if (ci) get_watermark_picture(ci, 1);
if (ctx)
av_free(ctx);
}
/****************************************************************************
*
*
****************************************************************************/
int Configure(void **ctxp, int argc, char *argv[])
{
@@ -100,9 +100,9 @@ int Configure(void **ctxp, int argc, char *argv[])
ci = (ContextInfo *) *ctxp;
optind = 1;
// Struct is mallocz:ed so no need to reset.
while ((c = getopt(argc, argv, "f:")) > 0) {
switch (c) {
case 'f':
@@ -114,13 +114,13 @@ int Configure(void **ctxp, int argc, char *argv[])
return -1;
}
}
//
if (0 == ci->filename[0]) {
av_log(NULL, AV_LOG_ERROR, "Watermark: There is no filename specified.\n");
return -1;
}
av_register_all();
return get_watermark_picture(ci, 0);
}
@@ -129,18 +129,18 @@ int Configure(void **ctxp, int argc, char *argv[])
/****************************************************************************
* Why is this a void returning functions? I want to be able to go wrong!
****************************************************************************/
void Process(void *ctx,
AVPicture *picture,
enum PixelFormat pix_fmt,
int src_width,
int src_height,
void Process(void *ctx,
AVPicture *picture,
enum PixelFormat pix_fmt,
int src_width,
int src_height,
int64_t pts)
{
ContextInfo *ci = (ContextInfo *) ctx;
char *buf = 0;
AVPicture picture1;
AVPicture *pict = picture;
AVFrame *pFrameRGB;
int xm_size;
int ym_size;
@@ -154,9 +154,9 @@ void Process(void *ctx,
uint32_t pixel_meck;
uint32_t pixel;
uint32_t pixelm;
int tmp;
int tmp;
//?? (void) ci;
if (pix_fmt != PIX_FMT_RGBA32) {
@@ -166,7 +166,7 @@ void Process(void *ctx,
buf = av_malloc(size);
avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
if (img_convert(&picture1, PIX_FMT_RGBA32,
if (img_convert(&picture1, PIX_FMT_RGBA32,
picture, pix_fmt, src_width, src_height) < 0) {
av_free(buf);
return;
@@ -176,15 +176,15 @@ void Process(void *ctx,
/* Insert filter code here */ /* ok */
// Get me next frame
// Get me next frame
if (0 > get_watermark_picture(ci, 0)) {
return;
}
}
// These are the three original static variables in the ffmpeg hack.
pFrameRGB = ci->pFrameRGB;
xm_size = ci->x_size;
ym_size = ci->y_size;
// I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
// According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
for (y=0; y<src_height; y++) {
@@ -214,23 +214,23 @@ void Process(void *ctx,
if (tmp > 255) tmp = 255;
if (tmp < 0) tmp = 0;
pixel_meck |= (tmp << 0) & 0xff;
// test:
//pixel_meck = pixel & 0xff000000;
//pixel_meck |= (pixelm & 0x00ffffff);
*p_pixel = pixel_meck;
offs += 4;
offs += 4;
} // foreach X
} // foreach Y
} // foreach Y
if (pix_fmt != PIX_FMT_RGBA32) {
if (img_convert(picture, pix_fmt,
if (img_convert(picture, pix_fmt,
&picture1, PIX_FMT_RGBA32, src_width, src_height) < 0) {
}
}
@@ -243,7 +243,7 @@ void Process(void *ctx,
* When cleanup == 0, we try to get the next frame. If no next frame, nothing
* is done.
*
* This code follows the example on
* This code follows the example on
* http://www.inb.uni-luebeck.de/~boehme/using_libavcodec.html
*
* 0 = ok, -1 = error
@@ -254,11 +254,11 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
// Yes, *pFrameRGB arguments must be null the first time otherwise it's not good..
// This block is only executed the first time we enter this function.
if (0 == ci->pFrameRGB &&
0 == cleanup)
if (0 == ci->pFrameRGB &&
0 == cleanup)
{
/*
/*
* The last three parameters specify the file format, buffer size and format
* parameters; by simply specifying NULL or 0 we ask libavformat to auto-detect
* the format and use a default buffer size. (Didn't work!)
@@ -285,24 +285,24 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Really failed to find iformat [%s]\n", ci->p_ext);
return -1;
}
// now continues the Martin template.
// now continues the Martin template.
if (av_open_input_file(&ci->pFormatCtx, ci->filename, ci->file_iformat, 0, NULL)!=0) {
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open input file [%s]\n", ci->filename);
return -1;
}
}
}
/*
/*
* This fills the streams field of the AVFormatContext with valid information.
*/
if(av_find_stream_info(ci->pFormatCtx)<0) {
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find stream info\n");
return -1;
}
/*
* As mentioned in the introduction, we'll handle only video streams, not audio
* As mentioned in the introduction, we'll handle only video streams, not audio
* streams. To make things nice and easy, we simply use the first video stream we
* find.
*/
@@ -317,48 +317,48 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any video stream\n");
return -1;
}
ci->st = ci->pFormatCtx->streams[ci->videoStream];
ci->x_size = ci->st->codec->width;
ci->y_size = ci->st->codec->height;
// Get a pointer to the codec context for the video stream
ci->pCodecCtx = ci->pFormatCtx->streams[ci->videoStream]->codec;
/*
* OK, so now we've got a pointer to the so-called codec context for our video
* stream, but we still have to find the actual codec and open it.
*/
*/
// Find the decoder for the video stream
ci->pCodec = avcodec_find_decoder(ci->pCodecCtx->codec_id);
if(ci->pCodec == NULL) {
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any codec\n");
return -1;
}
// Inform the codec that we can handle truncated bitstreams -- i.e.,
// bitstreams where frame boundaries can fall in the middle of packets
if (ci->pCodec->capabilities & CODEC_CAP_TRUNCATED)
ci->pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
// Open codec
if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) {
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open codec\n");
return -1;
}
// Hack to correct wrong frame rates that seem to be generated by some
// Hack to correct wrong frame rates that seem to be generated by some
// codecs
if (ci->pCodecCtx->time_base.den>1000 && ci->pCodecCtx->time_base.num==1)
ci->pCodecCtx->time_base.num=1000;
ci->pCodecCtx->time_base.num=1000;
/*
* Allocate a video frame to store the decoded images in.
*/
ci->pFrame = avcodec_alloc_frame();
/*
* The RGB image pFrameRGB (of type AVFrame *) is allocated like this:
*/
@@ -368,19 +368,19 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to alloc pFrameRGB\n");
return -1;
}
// Determine required buffer size and allocate buffer
ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width,
ci->pCodecCtx->height);
ci->buffer = av_malloc(ci->numBytes);
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32,
ci->pCodecCtx->width, ci->pCodecCtx->height);
ci->pCodecCtx->width, ci->pCodecCtx->height);
}
// TODO loop, pingpong etc?
if (0 == cleanup)
{
if (0 == cleanup)
{
// av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Get a frame\n");
while(av_read_frame(ci->pFormatCtx, &ci->packet)>=0)
{
@@ -388,32 +388,32 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
if(ci->packet.stream_index == ci->videoStream)
{
// Decode video frame
avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished,
avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished,
ci->packet.data, ci->packet.size);
// Did we get a video frame?
if(ci->frameFinished)
{
// Convert the image from its native format to RGBA32
img_convert((AVPicture *)ci->pFrameRGB, PIX_FMT_RGBA32,
(AVPicture*)(ci->pFrame), ci->pCodecCtx->pix_fmt, ci->pCodecCtx->width,
img_convert((AVPicture *)ci->pFrameRGB, PIX_FMT_RGBA32,
(AVPicture*)(ci->pFrame), ci->pCodecCtx->pix_fmt, ci->pCodecCtx->width,
ci->pCodecCtx->height);
// Process the video frame (save to disk etc.)
//fprintf(stderr,"banan() New frame!\n");
//DoSomethingWithTheImage(ci->pFrameRGB);
return 0;
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&ci->packet);
}
ci->is_done = 1;
ci->is_done = 1;
return 0;
} // if 0 != cleanup
if (0 != cleanup)
if (0 != cleanup)
{
// Free the RGB image
if (0 != ci->buffer) {
@@ -423,20 +423,20 @@ int get_watermark_picture(ContextInfo *ci, int cleanup)
if (0 != ci->pFrameRGB) {
av_free(ci->pFrameRGB);
ci->pFrameRGB = 0;
}
}
// Close the codec
if (0 != ci->pCodecCtx) {
avcodec_close(ci->pCodecCtx);
ci->pCodecCtx = 0;
}
// Close the video file
if (0 != ci->pFormatCtx) {
av_close_input_file(ci->pFormatCtx);
av_close_input_file(ci->pFormatCtx);
ci->pFormatCtx = 0;
}
ci->is_done = 0;
}
return 0;