1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

doxygen: Make sure parameter names match between .c and .h files.

This commit is contained in:
Diego Biurrun 2011-07-02 14:47:34 +02:00
parent 806212498a
commit 96c1e6d40d
10 changed files with 41 additions and 35 deletions

View File

@ -149,10 +149,8 @@ static int estimate_best_order(double *ref, int min_order, int max_order)
/**
* Calculate LPC coefficients for multiple orders
*
* @param lpc_type LPC method for determining coefficients
* 0 = LPC with fixed pre-defined coeffs
* 1 = LPC with coeffs determined by Levinson-Durbin recursion
* 2+ = LPC with coeffs determined by Cholesky factorization using (use_lpc-1) passes.
* @param lpc_type LPC method for determining coefficients,
* see #FFLPCType for details
*/
int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order,

View File

@ -141,6 +141,8 @@ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int ind
* corresponding filter instance which is added to graph with
* create_filter().
*
* @param filt_ctx Pointer that is set to the created and configured filter
* context on success, set to NULL on failure.
* @param filt_ctx put here a pointer to the created filter context on
* success, NULL otherwise
* @param buf pointer to the buffer to parse, *buf will be updated to

View File

@ -80,16 +80,16 @@ void ff_rdt_subscribe_rule(char *cmd, int size,
*
* @param buf input buffer
* @param len length of input buffer
* @param set_id will be set to the set ID this packet belongs to
* @param seq_no will be set to the sequence number of the packet
* @param stream_id will be set to the stream ID this packet belongs to
* @param is_keyframe will be whether this packet belongs to a keyframe
* @param timestamp will be set to the timestamp of the packet
* @param pset_id will be set to the set ID this packet belongs to
* @param pseq_no will be set to the sequence number of the packet
* @param pstream_id will be set to the stream ID this packet belongs to
* @param pis_keyframe will be whether this packet belongs to a keyframe
* @param ptimestamp will be set to the timestamp of the packet
* @return the amount of bytes consumed, or negative on error
*/
int ff_rdt_parse_header(const uint8_t *buf, int len,
int *set_id, int *seq_no, int *stream_id,
int *is_keyframe, uint32_t *timestamp);
int *pset_id, int *pseq_no, int *pstream_id,
int *pis_keyframe, uint32_t *ptimestamp);
/**
* Parse RDT-style packet data (header + media data).

View File

@ -488,9 +488,9 @@ void ff_rtsp_close_streams(AVFormatContext *s);
/**
* Close all connection handles within the RTSP (de)muxer
*
* @param rt RTSP (de)muxer context
* @param s RTSP (de)muxer context
*/
void ff_rtsp_close_connections(AVFormatContext *rt);
void ff_rtsp_close_connections(AVFormatContext *s);
/**
* Get the description of the stream and set up the RTSPStream child

View File

@ -74,12 +74,12 @@ typedef struct URLProtocol {
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffurl_alloc(URLContext **h, const char *url, int flags);
int ffurl_alloc(URLContext **puc, const char *filename, int flags);
/**
* Connect an URLContext that has been allocated by ffurl_alloc
*/
int ffurl_connect(URLContext *h);
int ffurl_connect(URLContext *uc);
/**
* Create an URLContext for accessing to the resource indicated by
@ -92,7 +92,7 @@ int ffurl_connect(URLContext *h);
* @return 0 in case of success, a negative value corresponding to an
* AVERROR code in case of failure
*/
int ffurl_open(URLContext **h, const char *url, int flags);
int ffurl_open(URLContext **puc, const char *filename, int flags);
/**
* Read up to size bytes from the resource accessed by h, and store

View File

@ -484,7 +484,7 @@ static time_t mktimegm(struct tm *tm)
return t;
}
int av_parse_time(int64_t *timeval, const char *datestr, int duration)
int av_parse_time(int64_t *timeval, const char *timestr, int duration)
{
const char *p;
int64_t t;
@ -506,19 +506,19 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration)
#undef time
time_t now = time(0);
len = strlen(datestr);
len = strlen(timestr);
if (len > 0)
lastch = datestr[len - 1];
lastch = timestr[len - 1];
else
lastch = '\0';
is_utc = (lastch == 'z' || lastch == 'Z');
memset(&dt, 0, sizeof(dt));
p = datestr;
p = timestr;
q = NULL;
if (!duration) {
if (!strncasecmp(datestr, "now", len)) {
if (!strncasecmp(timestr, "now", len)) {
*timeval = (int64_t) now * 1000000;
return 0;
}
@ -555,15 +555,15 @@ int av_parse_time(int64_t *timeval, const char *datestr, int duration)
}
}
} else {
/* parse datestr as a duration */
/* parse timestr as a duration */
if (p[0] == '-') {
negative = 1;
++p;
}
/* parse datestr as HH:MM:SS */
/* parse timestr as HH:MM:SS */
q = small_strptime(p, time_fmt[0], &dt);
if (!q) {
/* parse datestr as S+ */
/* parse timestr as S+ */
dt.tm_sec = strtol(p, (char **)&q, 10);
if (q == p) {
/* the parsing didn't succeed */

View File

@ -83,7 +83,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
* January, 1970 up to the time of the parsed date. If timestr cannot
* be successfully parsed, set *time to INT64_MIN.
* @param datestr a string representing a date or a duration.
* @param timestr a string representing a date or a duration.
* - If a date the syntax is:
* @code
* [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z]

View File

@ -215,7 +215,7 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param context the scaling context previously created with
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
@ -232,8 +232,9 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat
* the destination image
* @return the height of the output slice
*/
int sws_scale(struct SwsContext *context, const uint8_t* const srcSlice[], const int srcStride[],
int srcSliceY, int srcSliceH, uint8_t* const dst[], const int dstStride[]);
int sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* const dst[], const int dstStride[]);
/**
* @param inv_table the yuv2rgb coefficients, normally ff_yuv2rgb_coeffs[x]

View File

@ -804,18 +804,19 @@ static int check_image_pointers(uint8_t *data[4], enum PixelFormat pix_fmt,
* swscale wrapper, so we don't need to export the SwsContext.
* Assumes planar YUV to be in YUV order instead of YVU.
*/
int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* const dst[], const int dstStride[])
int sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t* const dst[], const int dstStride[])
{
int i;
const uint8_t* src2[4]= {src[0], src[1], src[2], src[3]};
const uint8_t* src2[4]= {srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3]};
uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]};
// do not mess up sliceDir if we have a "trailing" 0-size slice
if (srcSliceH == 0)
return 0;
if (!check_image_pointers(src, c->srcFormat, srcStride)) {
if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) {
av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
return 0;
}
@ -836,7 +837,7 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
for (i=0; i<256; i++) {
int p, r, g, b,y,u,v;
if(c->srcFormat == PIX_FMT_PAL8) {
p=((const uint32_t*)(src[1]))[i];
p=((const uint32_t*)(srcSlice[1]))[i];
r= (p>>16)&0xFF;
g= (p>> 8)&0xFF;
b= p &0xFF;

View File

@ -711,7 +711,9 @@ static void getSubSampleFactors(int *h, int *v, enum PixelFormat format)
*v = av_pix_fmt_descriptors[format].log2_chroma_h;
}
int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
int srcRange, const int table[4], int dstRange,
int brightness, int contrast, int saturation)
{
memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
memcpy(c->dstColorspaceTable, table, sizeof(int)*4);
@ -734,7 +736,9 @@ int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange
return 0;
}
int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
int *srcRange, int **table, int *dstRange,
int *brightness, int *contrast, int *saturation)
{
if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;