mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
kill a bunch of compiler warnings
Originally committed as revision 4522 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
72ce053b9c
commit
bf4e3bd2d0
@ -2988,7 +2988,8 @@ static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size,
|
||||
return s;
|
||||
}
|
||||
|
||||
static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h){
|
||||
static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
|
||||
MpegEncContext *c = v;
|
||||
int score1=0;
|
||||
int score2=0;
|
||||
int x,y;
|
||||
@ -3013,7 +3014,8 @@ static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int
|
||||
else return score1 + ABS(score2)*8;
|
||||
}
|
||||
|
||||
static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h){
|
||||
static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
|
||||
MpegEncContext *c = v;
|
||||
int score1=0;
|
||||
int score2=0;
|
||||
int x,y;
|
||||
|
@ -1277,9 +1277,9 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
|
||||
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref[0], 1);
|
||||
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, ref[1], 1);
|
||||
if(!IS_INTRA(mb_type_col)
|
||||
&& ( l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1
|
||||
|| l1ref0[0] < 0 && l1ref1[0] == 0 && ABS(l1mv1[0][0]) <= 1 && ABS(l1mv1[0][1]) <= 1
|
||||
&& (h->x264_build>33 || !h->x264_build))){
|
||||
&& ( (l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1)
|
||||
|| (l1ref0[0] < 0 && l1ref1[0] == 0 && ABS(l1mv1[0][0]) <= 1 && ABS(l1mv1[0][1]) <= 1
|
||||
&& (h->x264_build>33 || !h->x264_build)))){
|
||||
if(ref[0] > 0)
|
||||
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
|
||||
else
|
||||
@ -1308,8 +1308,8 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
|
||||
|
||||
/* col_zero_flag */
|
||||
if(!IS_INTRA(mb_type_col) && ( l1ref0[x8 + y8*h->b8_stride] == 0
|
||||
|| l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0
|
||||
&& (h->x264_build>33 || !h->x264_build))){
|
||||
|| (l1ref0[x8 + y8*h->b8_stride] < 0 && l1ref1[x8 + y8*h->b8_stride] == 0
|
||||
&& (h->x264_build>33 || !h->x264_build)))){
|
||||
const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1;
|
||||
for(i4=0; i4<4; i4++){
|
||||
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
|
||||
@ -1347,7 +1347,7 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
|
||||
const int x8 = i8&1;
|
||||
const int y8 = i8>>1;
|
||||
int ref0, dist_scale_factor;
|
||||
int16_t (*l1mv)[2]= l1mv0;
|
||||
const int16_t (*l1mv)[2]= l1mv0;
|
||||
|
||||
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
|
||||
continue;
|
||||
@ -2327,7 +2327,7 @@ static void pred8x8_plane_c(uint8_t *src, int stride){
|
||||
const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
|
||||
+ 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
|
||||
PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
|
||||
const int l7 = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
|
||||
const int l7 attribute_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
|
||||
|
||||
#define PT(x) \
|
||||
const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
|
||||
@ -2335,7 +2335,7 @@ static void pred8x8_plane_c(uint8_t *src, int stride){
|
||||
const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
|
||||
+ 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
|
||||
PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
|
||||
const int t7 = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
|
||||
const int t7 attribute_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
|
||||
+ 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
|
||||
|
||||
#define PTR(x) \
|
||||
@ -6188,7 +6188,7 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4]
|
||||
}
|
||||
}
|
||||
static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
|
||||
int i, d;
|
||||
int i;
|
||||
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
|
||||
const int alpha = alpha_table[index_a];
|
||||
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
|
||||
@ -6418,7 +6418,7 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4]
|
||||
}
|
||||
|
||||
static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
|
||||
int i, d;
|
||||
int i;
|
||||
const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
|
||||
const int alpha = alpha_table[index_a];
|
||||
const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
|
||||
|
@ -799,7 +799,6 @@ static const unsigned short __align16 SSE2_idct_data[7 * 8] =
|
||||
void ff_vp3_idct_sse2(int16_t *input_data)
|
||||
{
|
||||
unsigned char *input_bytes = (unsigned char *)input_data;
|
||||
unsigned char *dequant_const_bytes = (unsigned char *)SSE2_dequant_const;
|
||||
unsigned char *output_data_bytes = (unsigned char *)input_data;
|
||||
unsigned char *idct_data_bytes = (unsigned char *)SSE2_idct_data;
|
||||
unsigned char *Eight = (unsigned char *)eight_data;
|
||||
|
@ -141,7 +141,6 @@ static int ir2_decode_frame(AVCodecContext *avctx,
|
||||
AVFrame *picture = data;
|
||||
AVFrame * const p= (AVFrame*)&s->picture;
|
||||
int start;
|
||||
int i;
|
||||
|
||||
if(p->data[0])
|
||||
avctx->release_buffer(avctx, p);
|
||||
|
@ -175,7 +175,7 @@ static always_inline int cmp(MpegEncContext *s, const int x, const int y, const
|
||||
}else
|
||||
d= 256*256*256*32;
|
||||
}else{
|
||||
int uvdxy;
|
||||
int uvdxy; /* no, it might not be used uninitialized */
|
||||
if(dxy){
|
||||
if(qpel){
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
@ -1563,7 +1563,6 @@ static inline int check_bidir_mv(MpegEncContext * s,
|
||||
MotionEstContext * const c= &s->me;
|
||||
uint8_t * const mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame
|
||||
int stride= c->stride;
|
||||
int uvstride= c->uvstride;
|
||||
uint8_t *dest_y = c->scratchpad;
|
||||
uint8_t *ptr;
|
||||
int dxy;
|
||||
|
@ -1810,7 +1810,6 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
||||
RLTable *rl = &rl_mpeg1;
|
||||
uint8_t * const scantable= s->intra_scantable.permutated;
|
||||
const int qscale= s->qscale;
|
||||
int v;
|
||||
OPEN_READER(re, &s->gb);
|
||||
i = -1;
|
||||
|
||||
|
@ -887,7 +887,7 @@ void MPV_common_end(MpegEncContext *s)
|
||||
int MPV_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
int i, dummy;
|
||||
int i;
|
||||
int chroma_h_shift, chroma_v_shift;
|
||||
|
||||
MPV_encode_defaults(s);
|
||||
|
@ -37,7 +37,7 @@ typedef struct UltimotionDecodeContext {
|
||||
AVCodecContext *avctx;
|
||||
int width, height, blocks;
|
||||
AVFrame frame;
|
||||
uint8_t *ulti_codebook;
|
||||
const uint8_t *ulti_codebook;
|
||||
} UltimotionDecodeContext;
|
||||
|
||||
static int ulti_decode_init(AVCodecContext *avctx)
|
||||
|
@ -1725,7 +1725,6 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
||||
int plane_height;
|
||||
int slice_height;
|
||||
int current_macroblock_entry = slice * s->macroblock_width * 6;
|
||||
int *bounding_values= s->bounding_values_array+127;
|
||||
int fragment_width;
|
||||
|
||||
if (slice >= s->macroblock_height)
|
||||
|
@ -45,7 +45,7 @@ static always_inline void idct(uint8_t *dst, int stride, int16_t *input, int typ
|
||||
int _Ed, _Gd, _Add, _Bdd, _Fd, _Hd;
|
||||
int t1, t2;
|
||||
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
/* Inverse DCT on the rows now */
|
||||
for (i = 0; i < 8; i++) {
|
||||
|
@ -644,14 +644,12 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
||||
}
|
||||
|
||||
static int guess_ni_flag(AVFormatContext *s){
|
||||
AVIContext *avi = s->priv_data;
|
||||
int i;
|
||||
int64_t last_start=0;
|
||||
int64_t first_end= INT64_MAX;
|
||||
|
||||
for(i=0; i<s->nb_streams; i++){
|
||||
AVStream *st = s->streams[i];
|
||||
AVIStream *ast = st->priv_data;
|
||||
int n= st->nb_index_entries;
|
||||
|
||||
if(n <= 0)
|
||||
|
@ -297,8 +297,8 @@ static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
|
||||
//s->file_size = (uint32_t)get_le32(pb);
|
||||
file_size = (uint32_t)get_le32(pb);
|
||||
PRINT(("NSV NSVf chunk_size %ld\n", size));
|
||||
PRINT(("NSV NSVf file_size %Ld\n", file_size));
|
||||
PRINT(("NSV NSVf chunk_size %u\n", size));
|
||||
PRINT(("NSV NSVf file_size %u\n", file_size));
|
||||
|
||||
nsv->duration = duration = get_le32(pb); /* in ms */
|
||||
PRINT(("NSV NSVf duration %Ld ms\n", duration));
|
||||
@ -561,7 +561,7 @@ null_chunk_retry:
|
||||
asize = get_le16(pb);
|
||||
vsize = (vsize << 4) | (auxcount >> 4);
|
||||
auxcount &= 0x0f;
|
||||
PRINT(("NSV CHUNK %d aux, %ld bytes video, %d bytes audio\n", auxcount, vsize, asize));
|
||||
PRINT(("NSV CHUNK %d aux, %u bytes video, %d bytes audio\n", auxcount, vsize, asize));
|
||||
/* skip aux stuff */
|
||||
for (i = 0; i < auxcount; i++) {
|
||||
auxsize = get_le16(pb);
|
||||
@ -614,7 +614,7 @@ null_chunk_retry:
|
||||
channels = get_byte(pb);
|
||||
samplerate = get_le16(pb);
|
||||
asize-=4;
|
||||
PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %ld\n", bps, channels, samplerate));
|
||||
PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
|
||||
if (fill_header) {
|
||||
st[NSV_ST_AUDIO]->need_parsing = 0; /* we know everything */
|
||||
if (bps != 16) {
|
||||
@ -629,7 +629,7 @@ null_chunk_retry:
|
||||
st[NSV_ST_AUDIO]->codec->sample_rate = samplerate;
|
||||
av_set_pts_info(st[NSV_ST_AUDIO], 64, 1,
|
||||
st[NSV_ST_AUDIO]->codec->sample_rate);
|
||||
PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %ld\n", bps, channels, samplerate));
|
||||
PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
|
||||
}
|
||||
}
|
||||
av_get_packet(pb, pkt, asize);
|
||||
|
@ -1569,7 +1569,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
|
||||
AVPacket pkt1, *pkt = &pkt1;
|
||||
AVStream *st;
|
||||
int read_size, i, ret;
|
||||
int64_t start_time, end_time, end_time1;
|
||||
int64_t end_time;
|
||||
int64_t filesize, offset, duration;
|
||||
|
||||
/* free previous packet */
|
||||
|
@ -336,7 +336,7 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
char header[MAX_FRAME_HEADER+1];
|
||||
int packet_size, ret, width, height;
|
||||
int packet_size, width, height;
|
||||
AVStream *st = s->streams[0];
|
||||
|
||||
for (i=0; i<MAX_FRAME_HEADER; i++) {
|
||||
|
@ -28,7 +28,7 @@
|
||||
double av_int2dbl(int64_t v){
|
||||
if(v+v > 0xFFELLU<<52)
|
||||
return 0.0/0.0;
|
||||
return ldexp(((v&(1LL<<52)-1) + (1LL<<52)) * (v>>63|1), (v>>52&0x7FF)-1075);
|
||||
return ldexp(((v&((1LL<<52)-1)) + (1LL<<52)) * (v>>63|1), (v>>52&0x7FF)-1075);
|
||||
}
|
||||
|
||||
float av_int2flt(int32_t v){
|
||||
|
Loading…
Reference in New Issue
Block a user