diff --git a/ffmpeg.c b/ffmpeg.c index 41150223a5..ff33f85d6f 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -475,7 +475,7 @@ static void do_audio_out(AVFormatContext *s, fprintf(stderr, "adding %d audio samples of silence\n", (int)delta); } }else if(audio_sync_method>1){ - int comp= clip(delta, -audio_sync_method, audio_sync_method); + int comp= av_clip(delta, -audio_sync_method, audio_sync_method); assert(ost->audio_resample); if(verbose > 2) fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate); diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index 1b01103c8d..59be8e59d6 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -209,7 +209,7 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; c->prev_sample = c->prev_sample + ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); CLAMP_TO_SHORT(c->prev_sample); - c->step_index = clip(c->step_index + index_table[nibble], 0, 88); + c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); return nibble; } @@ -224,7 +224,7 @@ static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, shor else bias=-c->idelta/2; nibble= (nibble + bias) / c->idelta; - nibble= clip(nibble, -8, 7)&0x0F; + nibble= av_clip(nibble, -8, 7)&0x0F; predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; CLAMP_TO_SHORT(predictor); @@ -254,7 +254,7 @@ static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, c->predictor = c->predictor + ((c->step * yamaha_difflookup[nibble]) / 8); CLAMP_TO_SHORT(c->predictor); c->step = (c->step * yamaha_indexscale[nibble]) >> 8; - c->step = clip(c->step, 127, 24567); + c->step = av_clip(c->step, 127, 24567); return nibble; } @@ -324,8 +324,8 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, if(version == CODEC_ID_ADPCM_MS) { const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 256; const int div = (sample - predictor) / step; - const int nmin = clip(div-range, -8, 6); - const int nmax = clip(div+range, -7, 7); + const int nmin = av_clip(div-range, -8, 6); + const int nmax = av_clip(div+range, -7, 7); for(nidx=nmin; nidx<=nmax; nidx++) { const int nibble = nidx & 0xf; int dec_sample = predictor + nidx * step; @@ -372,8 +372,8 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ const int predictor = nodes[j]->sample1;\ const int div = (sample - predictor) * 4 / STEP_TABLE;\ - int nmin = clip(div-range, -7, 6);\ - int nmax = clip(div+range, -6, 7);\ + int nmin = av_clip(div-range, -7, 6);\ + int nmax = av_clip(div+range, -6, 7);\ if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ if(nmax<0) nmax--;\ for(nidx=nmin; nidx<=nmax; nidx++) {\ @@ -381,9 +381,9 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ STORE_NODE(NAME, STEP_INDEX);\ } - LOOP_NODES(ima, step_table[step], clip(step + index_table[nibble], 0, 88)); + LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); } else { //CODEC_ID_ADPCM_YAMAHA - LOOP_NODES(yamaha, step, clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); + LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); #undef LOOP_NODES #undef STORE_NODE } @@ -734,7 +734,7 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; CLAMP_TO_SHORT(c->predictor); c->step = (c->step * yamaha_indexscale[nibble]) >> 8; - c->step = clip(c->step, 127, 24567); + c->step = av_clip(c->step, 127, 24567); return c->predictor; } @@ -974,10 +974,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, n = buf_size - 7 * avctx->channels; if (n < 0) return -1; - block_predictor[0] = clip(*src++, 0, 7); + block_predictor[0] = av_clip(*src++, 0, 7); block_predictor[1] = 0; if (st) - block_predictor[1] = clip(*src++, 0, 7); + block_predictor[1] = av_clip(*src++, 0, 7); c->status[0].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; if (st){ @@ -1299,8 +1299,8 @@ return -1; c->status[i].step_index += table[delta & (~signmask)]; - c->status[i].step_index = clip(c->status[i].step_index, 0, 88); - c->status[i].predictor = clip(c->status[i].predictor, -32768, 32767); + c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); + c->status[i].predictor = av_clip(c->status[i].predictor, -32768, 32767); *samples++ = c->status[i].predictor; } diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c index ee862bbc7b..4672635d7c 100644 --- a/libavcodec/cavs.c +++ b/libavcodec/cavs.c @@ -128,9 +128,9 @@ static inline int get_bs(vector_t *mvP, vector_t *mvQ, int b) { } #define SET_PARAMS \ - alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \ - beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \ - tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)]; + alpha = alpha_tab[av_clip(qp_avg + h->alpha_offset,0,63)]; \ + beta = beta_tab[av_clip(qp_avg + h->beta_offset, 0,63)]; \ + tc = tc_tab[av_clip(qp_avg + h->alpha_offset,0,63)]; /** * in-loop deblocking filter for a single macroblock diff --git a/libavcodec/cavsdsp.c b/libavcodec/cavsdsp.c index 220dec1b85..55ecaae0a0 100644 --- a/libavcodec/cavsdsp.c +++ b/libavcodec/cavsdsp.c @@ -63,16 +63,16 @@ static inline void loop_filter_l1(uint8_t *p0_p, int stride, int alpha, int beta int q0 = Q0; if(abs(p0-q0)>3,-tc, tc); - P0 = clip_uint8(p0+delta); - Q0 = clip_uint8(q0-delta); + int delta = av_clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc); + P0 = av_clip_uint8(p0+delta); + Q0 = av_clip_uint8(q0-delta); if(abs(P2-p0)>3, -tc, tc); - P1 = clip_uint8(P1+delta); + delta = av_clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc); + P1 = av_clip_uint8(P1+delta); } if(abs(Q2-q0)>3, -tc, tc); - Q1 = clip_uint8(Q1-delta); + delta = av_clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc); + Q1 = av_clip_uint8(Q1-delta); } } } @@ -98,9 +98,9 @@ static inline void loop_filter_c2(uint8_t *p0_p,int stride,int alpha, int beta) static inline void loop_filter_c1(uint8_t *p0_p,int stride,int alpha, int beta, int tc) { if(abs(P0-Q0)>3, -tc, tc); - P0 = clip_uint8(P0+delta); - Q0 = clip_uint8(Q0-delta); + int delta = av_clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc); + P0 = av_clip_uint8(P0+delta); + Q0 = av_clip_uint8(Q0-delta); } } diff --git a/libavcodec/cook.c b/libavcodec/cook.c index edfe513328..9a3221414a 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -998,7 +998,7 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer, */ for (j = 0; j < q->samples_per_channel; j++) { out[chan + q->nb_channels * j] = - clip(lrintf(q->mono_mdct_output[j]), -32768, 32767); + av_clip(lrintf(q->mono_mdct_output[j]), -32768, 32767); } } diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c index dd256e5dc5..c7c3f56273 100644 --- a/libavcodec/dsicinav.c +++ b/libavcodec/dsicinav.c @@ -327,7 +327,7 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, } while (buf_size > 0) { cin->delta += cinaudio_delta16_table[*src++]; - cin->delta = clip(cin->delta, -32768, 32767); + cin->delta = av_clip(cin->delta, -32768, 32767); *samples++ = cin->delta; --buf_size; } diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index 916d8658cd..eda493d20c 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -1178,19 +1178,19 @@ void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, + src[index+stride+1]* frac_x )* frac_y + r)>>(shift*2); }else{ - index= src_x + clip(src_y, 0, height)*stride; + index= src_x + av_clip(src_y, 0, height)*stride; dst[y*stride + x]= ( ( src[index ]*(s-frac_x) + src[index +1]* frac_x )*s + r)>>(shift*2); } }else{ if((unsigned)src_y < height){ - index= clip(src_x, 0, width) + src_y*stride; + index= av_clip(src_x, 0, width) + src_y*stride; dst[y*stride + x]= ( ( src[index ]*(s-frac_y) + src[index+stride ]* frac_y )*s + r)>>(shift*2); }else{ - index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride; + index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride; dst[y*stride + x]= src[index ]; } } @@ -2434,8 +2434,8 @@ H264_MC(avg_, 16) #undef op2_put #endif -#define op_scale1(x) block[x] = clip_uint8( (block[x]*weight + offset) >> log2_denom ) -#define op_scale2(x) dst[x] = clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) +#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom ) +#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) #define H264_WEIGHT(W,H) \ static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ int y; \ @@ -2659,7 +2659,7 @@ static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){ ad1= FFABS(d1)>>1; - d2= clip((p0-p3)/4, -ad1, ad1); + d2= av_clip((p0-p3)/4, -ad1, ad1); src[x-2*stride] = p0 - d2; src[x+ stride] = p3 + d2; @@ -2694,7 +2694,7 @@ static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){ ad1= FFABS(d1)>>1; - d2= clip((p0-p3)/4, -ad1, ad1); + d2= av_clip((p0-p3)/4, -ad1, ad1); src[y*stride-2] = p0 - d2; src[y*stride+1] = p3 + d2; @@ -2752,17 +2752,17 @@ static inline void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystrid int i_delta; if( FFABS( p2 - p0 ) < beta ) { - pix[-2*xstride] = p1 + clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); + pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); tc++; } if( FFABS( q2 - q0 ) < beta ) { - pix[ xstride] = q1 + clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); + pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); tc++; } - i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-xstride] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ + i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ } pix += ystride; } @@ -2796,10 +2796,10 @@ static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystr FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { - int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-xstride] = clip_uint8( p0 + delta ); /* p0' */ - pix[0] = clip_uint8( q0 - delta ); /* q0' */ + pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index 2e15df3d2f..175ccf73ad 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -612,8 +612,8 @@ void ff_er_frame_start(MpegEncContext *s){ * error of the same type occured */ void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){ - const int start_i= clip(startx + starty * s->mb_width , 0, s->mb_num-1); - const int end_i = clip(endx + endy * s->mb_width , 0, s->mb_num); + const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1); + const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num); const int start_xy= s->mb_index2xy[start_i]; const int end_xy = s->mb_index2xy[end_i]; int mask= -1; diff --git a/libavcodec/eval.c b/libavcodec/eval.c index 6ec7069914..877de35529 100644 --- a/libavcodec/eval.c +++ b/libavcodec/eval.c @@ -155,7 +155,7 @@ static double eval_expr(Parser * p, AVEvalExpr * e) { case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1])); case e_squish: return 1/(1+exp(4*eval_expr(p, e->param[0]))); case e_gauss: { double d = eval_expr(p, e->param[0]); return exp(-d*d/2)/sqrt(2*M_PI); } - case e_ld: return e->value * p->var[clip(eval_expr(p, e->param[0]), 0, VARS-1)]; + case e_ld: return e->value * p->var[av_clip(eval_expr(p, e->param[0]), 0, VARS-1)]; case e_while: { double d = NAN; while(eval_expr(p, e->param[0])) @@ -177,7 +177,7 @@ static double eval_expr(Parser * p, AVEvalExpr * e) { case e_div: return e->value * (d / d2); case e_add: return e->value * (d + d2); case e_last:return e->value * d2; - case e_st : return e->value * (p->var[clip(d, 0, VARS-1)]= d2); + case e_st : return e->value * (p->var[av_clip(d, 0, VARS-1)]= d2); } } } diff --git a/libavcodec/flacenc.c b/libavcodec/flacenc.c index b7b7d0d8e6..9dd6c7eb87 100644 --- a/libavcodec/flacenc.c +++ b/libavcodec/flacenc.c @@ -244,7 +244,7 @@ static int flac_encode_init(AVCodecContext *avctx) /* set compression option overrides from AVCodecContext */ if(avctx->use_lpc >= 0) { - s->options.use_lpc = clip(avctx->use_lpc, 0, 11); + s->options.use_lpc = av_clip(avctx->use_lpc, 0, 11); } if(s->options.use_lpc == 1) av_log(avctx, AV_LOG_DEBUG, " use lpc: Levinson-Durbin recursion with Welch window\n"); @@ -712,7 +712,7 @@ static void quantize_lpc_coefs(double *lpc_in, int order, int precision, error=0; for(i=0; ib[i] = 0; } else { /* This is a bit crazy, but it really is +255 not +256 */ - fa1 = clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255); + fa1 = av_clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255); c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7); - c->a[1] = clip(c->a[1], -12288, 12288); + c->a[1] = av_clip(c->a[1], -12288, 12288); c->a[0] += 64*3*pk0*c->pk[0] - (c->a[0] >> 8); - c->a[0] = clip(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]); + c->a[0] = av_clip(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]); for (i=0; i<6; i++) c->b[i] += 128*dq0*sgn(-c->dq[i].sign) - (c->b[i]>>8); @@ -248,7 +248,7 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I) c->ap += (0x200 - c->ap) >> 4; /* Update Yu and Yl */ - c->yu = clip(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120); + c->yu = av_clip(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120); c->yl += c->yu + ((-c->yl)>>6); /* Next iteration for Y */ @@ -264,7 +264,7 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I) c->se += mult(i2f(c->a[i] >> 2, &f), &c->sr[i]); c->se >>= 1; - return clip(re_signal << 2, -0xffff, 0xffff); + return av_clip(re_signal << 2, -0xffff, 0xffff); } static int g726_reset(G726Context* c, int bit_rate) diff --git a/libavcodec/h263.c b/libavcodec/h263.c index 31cc583915..8ae228e4ab 100644 --- a/libavcodec/h263.c +++ b/libavcodec/h263.c @@ -211,7 +211,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) for(i=0; i<2; i++){ int div, error; div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den); - div= clip(1, div, 127); + div= av_clip(1, div, 127); error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div); if(error < best_error){ best_error= error; @@ -496,7 +496,7 @@ static void ff_init_qscale_tab(MpegEncContext *s){ for(i=0; imb_num; i++){ unsigned int lam= s->lambda_table[ s->mb_index2xy[i] ]; int qp= (lam*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); - qscale_table[ s->mb_index2xy[i] ]= clip(qp, s->avctx->qmin, s->avctx->qmax); + qscale_table[ s->mb_index2xy[i] ]= av_clip(qp, s->avctx->qmin, s->avctx->qmax); } } diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 7d6087b30f..7d5a93db4b 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -1322,13 +1322,13 @@ static inline void direct_dist_scale_factor(H264Context * const h){ int i; for(i=0; iref_count[0]; i++){ int poc0 = h->ref_list[0][i].poc; - int td = clip(poc1 - poc0, -128, 127); + int td = av_clip(poc1 - poc0, -128, 127); if(td == 0 /* FIXME || pic0 is a long-term ref */){ h->dist_scale_factor[i] = 256; }else{ - int tb = clip(poc - poc0, -128, 127); + int tb = av_clip(poc - poc0, -128, 127); int tx = (16384 + (FFABS(td) >> 1)) / td; - h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023); + h->dist_scale_factor[i] = av_clip((tb*tx + 32) >> 6, -1024, 1023); } } if(FRAME_MBAFF){ @@ -1948,7 +1948,7 @@ static void chroma_dc_dct_c(DCTELEM *block){ */ static inline int get_chroma_qp(int chroma_qp_index_offset, int qscale){ - return chroma_qp[clip(qscale + chroma_qp_index_offset, 0, 51)]; + return chroma_qp[av_clip(qscale + chroma_qp_index_offset, 0, 51)]; } //FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close @@ -4122,11 +4122,11 @@ static void implicit_weight_table(H264Context *h){ int poc0 = h->ref_list[0][ref0].poc; for(ref1=0; ref1 < h->ref_count[1]; ref1++){ int poc1 = h->ref_list[1][ref1].poc; - int td = clip(poc1 - poc0, -128, 127); + int td = av_clip(poc1 - poc0, -128, 127); if(td){ - int tb = clip(cur_poc - poc0, -128, 127); + int tb = av_clip(cur_poc - poc0, -128, 127); int tx = (16384 + (FFABS(td) >> 1)) / td; - int dist_scale_factor = clip((tb*tx + 32) >> 6, -1024, 1023) >> 2; + int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2; if(dist_scale_factor < -64 || dist_scale_factor > 128) h->implicit_weight[ref0][ref1] = 32; else @@ -6814,17 +6814,17 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int int i_delta; if( FFABS( p2 - p0 ) < beta ) { - pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); + pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( FFABS( q2 - q0 ) < beta ) { - pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); + pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } - i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ + i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ @@ -6902,10 +6902,10 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { - const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ + pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf("filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ @@ -7387,9 +7387,9 @@ static int decode_slice(H264Context *h){ for( i= 0; i < 460; i++ ) { int pre; if( h->slice_type == I_TYPE ) - pre = clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 ); + pre = av_clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 ); else - pre = clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 ); + pre = av_clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 ); if( pre <= 63 ) h->cabac_state[i] = 2 * ( 63 - pre ) + 0; diff --git a/libavcodec/imc.c b/libavcodec/imc.c index 7360b64096..6140130b1c 100644 --- a/libavcodec/imc.c +++ b/libavcodec/imc.c @@ -347,7 +347,7 @@ static int bit_allocation (IMCContext* q, int stream_format_code, int freebits, iacc = 0; for(j = (stream_format_code & 0x2)?4:0; j < BANDS; j++) { - cwlen = clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6); + cwlen = av_clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6); q->bitsBandT[j] = cwlen; summer += q->bandWidthT[j] * cwlen; diff --git a/libavcodec/indeo2.c b/libavcodec/indeo2.c index f3917ff3aa..2b129d1418 100644 --- a/libavcodec/indeo2.c +++ b/libavcodec/indeo2.c @@ -87,11 +87,11 @@ static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst } } else { /* add two deltas from table */ t = dst[out - stride] + (table[c * 2] - 128); - t= clip_uint8(t); + t= av_clip_uint8(t); dst[out] = t; out++; t = dst[out - stride] + (table[(c * 2) + 1] - 128); - t= clip_uint8(t); + t= av_clip_uint8(t); dst[out] = t; out++; } @@ -121,11 +121,11 @@ static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_ out += c * 2; } else { /* add two deltas from table */ t = dst[out] + (((table[c * 2] - 128)*3) >> 2); - t= clip_uint8(t); + t= av_clip_uint8(t); dst[out] = t; out++; t = dst[out] + (((table[(c * 2) + 1] - 128)*3) >> 2); - t= clip_uint8(t); + t= av_clip_uint8(t); dst[out] = t; out++; } diff --git a/libavcodec/jpeg_ls.c b/libavcodec/jpeg_ls.c index 67f0d0f5ab..136e3fb809 100644 --- a/libavcodec/jpeg_ls.c +++ b/libavcodec/jpeg_ls.c @@ -366,10 +366,10 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void * } if(sign){ - pred = clip(pred - state->C[context], 0, state->maxval); + pred = av_clip(pred - state->C[context], 0, state->maxval); err = -ls_get_code_regular(&s->gb, state, context); } else { - pred = clip(pred + state->C[context], 0, state->maxval); + pred = av_clip(pred + state->C[context], 0, state->maxval); err = ls_get_code_regular(&s->gb, state, context); } @@ -381,7 +381,7 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void * pred += state->range * state->twonear; else if(pred > state->maxval + state->near) pred -= state->range * state->twonear; - pred = clip(pred, 0, state->maxval); + pred = av_clip(pred, 0, state->maxval); } pred &= state->maxval; @@ -623,9 +623,9 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, void *last err = -(state->near - err) / state->twonear; if(RItype || (Rb >= Ra)) - Ra = clip(pred + err * state->twonear, 0, state->maxval); + Ra = av_clip(pred + err * state->twonear, 0, state->maxval); else - Ra = clip(pred - err * state->twonear, 0, state->maxval); + Ra = av_clip(pred - err * state->twonear, 0, state->maxval); W(cur, x, Ra); } if(err < 0) @@ -646,11 +646,11 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, void *last if(context < 0){ context = -context; sign = 1; - pred = clip(pred - state->C[context], 0, state->maxval); + pred = av_clip(pred - state->C[context], 0, state->maxval); err = pred - R(cur, x); }else{ sign = 0; - pred = clip(pred + state->C[context], 0, state->maxval); + pred = av_clip(pred + state->C[context], 0, state->maxval); err = R(cur, x) - pred; } @@ -660,9 +660,9 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, void *last else err = -(state->near - err) / state->twonear; if(!sign) - Ra = clip(pred + err * state->twonear, 0, state->maxval); + Ra = av_clip(pred + err * state->twonear, 0, state->maxval); else - Ra = clip(pred - err * state->twonear, 0, state->maxval); + Ra = av_clip(pred - err * state->twonear, 0, state->maxval); W(cur, x, Ra); } diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c index f9c0d0a601..2e8b1d34bc 100644 --- a/libavcodec/motion_est.c +++ b/libavcodec/motion_est.c @@ -1798,15 +1798,15 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y) c->pred_x=0; c->pred_y=0; - P_LEFT[0] = clip(mv_table[mot_xy - 1][0], xmin<first_slice_line) { //FIXME maybe allow this over thread boundary as its clipped - P_TOP[0] = clip(mv_table[mot_xy - mot_stride ][0], xmin<qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); - s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax); + s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax); s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; } @@ -1713,10 +1713,10 @@ void MPV_frame_end(MpegEncContext *s) static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ int x, y, fr, f; - sx= clip(sx, 0, w-1); - sy= clip(sy, 0, h-1); - ex= clip(ex, 0, w-1); - ey= clip(ey, 0, h-1); + sx= av_clip(sx, 0, w-1); + sy= av_clip(sy, 0, h-1); + ex= av_clip(ex, 0, w-1); + ey= av_clip(ey, 0, h-1); buf[sy*stride + sx]+= color; @@ -1762,10 +1762,10 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ int dx,dy; - sx= clip(sx, -100, w+100); - sy= clip(sy, -100, h+100); - ex= clip(ex, -100, w+100); - ey= clip(ey, -100, h+100); + sx= av_clip(sx, -100, w+100); + sy= av_clip(sy, -100, h+100); + ex= av_clip(ex, -100, w+100); + ey= av_clip(ey, -100, h+100); dx= ex - sx; dy= ey - sy; @@ -2664,10 +2664,10 @@ static inline void gmc1_motion(MpegEncContext *s, src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1)); motion_x<<=(3-s->sprite_warping_accuracy); motion_y<<=(3-s->sprite_warping_accuracy); - src_x = clip(src_x, -16, s->width); + src_x = av_clip(src_x, -16, s->width); if (src_x == s->width) motion_x =0; - src_y = clip(src_y, -16, s->height); + src_y = av_clip(src_y, -16, s->height); if (src_y == s->height) motion_y =0; @@ -2706,10 +2706,10 @@ static inline void gmc1_motion(MpegEncContext *s, src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1)); motion_x<<=(3-s->sprite_warping_accuracy); motion_y<<=(3-s->sprite_warping_accuracy); - src_x = clip(src_x, -8, s->width>>1); + src_x = av_clip(src_x, -8, s->width>>1); if (src_x == s->width>>1) motion_x =0; - src_y = clip(src_y, -8, s->height>>1); + src_y = av_clip(src_y, -8, s->height>>1); if (src_y == s->height>>1) motion_y =0; @@ -2879,10 +2879,10 @@ static inline int hpel_motion(MpegEncContext *s, src_y += motion_y >> 1; /* WARNING: do no forget half pels */ - src_x = clip(src_x, -16, width); //FIXME unneeded for emu? + src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu? if (src_x == width) dxy &= ~1; - src_y = clip(src_y, -16, height); + src_y = av_clip(src_y, -16, height); if (src_y == height) dxy &= ~2; src += src_y * stride + src_x; @@ -3358,10 +3358,10 @@ static inline void chroma_4mv_motion(MpegEncContext *s, src_x = s->mb_x * 8 + mx; src_y = s->mb_y * 8 + my; - src_x = clip(src_x, -8, s->width/2); + src_x = av_clip(src_x, -8, s->width/2); if (src_x == s->width/2) dxy &= ~1; - src_y = clip(src_y, -8, s->height/2); + src_y = av_clip(src_y, -8, s->height/2); if (src_y == s->height/2) dxy &= ~2; @@ -3574,10 +3574,10 @@ static inline void MPV_motion(MpegEncContext *s, src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8; /* WARNING: do no forget half pels */ - src_x = clip(src_x, -16, s->width); + src_x = av_clip(src_x, -16, s->width); if (src_x == s->width) dxy &= ~3; - src_y = clip(src_y, -16, s->height); + src_y = av_clip(src_y, -16, s->height); if (src_y == s->height) dxy &= ~12; @@ -4343,7 +4343,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, s->dquant= s->qscale - last_qp; if(s->out_format==FMT_H263){ - s->dquant= clip(s->dquant, -2, 2); + s->dquant= av_clip(s->dquant, -2, 2); if(s->codec_id==CODEC_ID_MPEG4){ if(!s->mb_intra){ @@ -5742,7 +5742,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) for(i=1;i<64;i++){ int j= s->dsp.idct_permutation[i]; - s->intra_matrix[j] = clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); + s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); } convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, 8, 8, 1); diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c index 9cf63c0cf6..6d9270da88 100644 --- a/libavcodec/ratecontrol.c +++ b/libavcodec/ratecontrol.c @@ -280,7 +280,7 @@ int ff_vbv_update(MpegEncContext *s, int frame_size){ } left= buffer_size - rcc->buffer_index - 1; - rcc->buffer_index += clip(left, min_rate, max_rate); + rcc->buffer_index += av_clip(left, min_rate, max_rate); if(rcc->buffer_index > buffer_size){ int stuffing= ceil((rcc->buffer_index - buffer_size)/8); @@ -417,8 +417,8 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); } - qmin= clip(qmin, 1, FF_LAMBDA_MAX); - qmax= clip(qmax, 1, FF_LAMBDA_MAX); + qmin= av_clip(qmin, 1, FF_LAMBDA_MAX); + qmax= av_clip(qmax, 1, FF_LAMBDA_MAX); if(qmaxnum_entries; i++){ /* av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n", i, rcc->entry[i].new_qscale, rcc->entry[i].new_qscale / FF_QP2LAMBDA); */ - qscale_sum += clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax); + qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax); } assert(toobig <= 40); av_log(s->avctx, AV_LOG_DEBUG, diff --git a/libavcodec/resample2.c b/libavcodec/resample2.c index 3ae0ba855c..9cc2edbe8f 100644 --- a/libavcodec/resample2.c +++ b/libavcodec/resample2.c @@ -121,7 +121,7 @@ void av_build_filter(FELEM *filter, double factor, int tap_count, int phase_coun /* normalize so that an uniform color remains the same */ for(i=0;iwidth; int y; - const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16); + const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16); int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT); int qadd= (s->qbias*qmul)>>QBIAS_SHIFT; int new_index = 0; @@ -2898,7 +2898,7 @@ static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){ } *b= backup; - return clip(((ab<level; const int w= b->width; const int h= b->height; - const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16); + const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16); const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT); int x,y, thres1, thres2; // START_TIMER @@ -3466,7 +3466,7 @@ static void quantize(SnowContext *s, SubBand *b, DWTELEM *src, int stride, int b static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride, int start_y, int end_y){ const int w= b->width; - const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16); + const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16); const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT); const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT; int x,y; @@ -3494,7 +3494,7 @@ static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand static void dequantize(SnowContext *s, SubBand *b, DWTELEM *src, int stride){ const int w= b->width; const int h= b->height; - const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16); + const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16); const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT); const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT; int x,y; @@ -3869,7 +3869,7 @@ static int ratecontrol_1pass(SnowContext *s, AVFrame *pict) const int w= b->width; const int h= b->height; const int stride= b->stride; - const int qlog= clip(2*QROOT + b->qlog, 0, QROOT*16); + const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16); const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT); const int qdiv= (1<<16)/qmul; int x, y; diff --git a/libavcodec/svq1.c b/libavcodec/svq1.c index 4a6c5f55c9..55595b7baa 100644 --- a/libavcodec/svq1.c +++ b/libavcodec/svq1.c @@ -1004,8 +1004,8 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec diff= block_sum[stage] - sum; mean= (diff + (size>>1)) >> (level+3); assert(mean >-300 && mean<300); - if(intra) mean= clip(mean, 0, 255); - else mean= clip(mean, -256, 255); + if(intra) mean= av_clip(mean, 0, 255); + else mean= av_clip(mean, -256, 255); score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow if(score < best_vector_score){ best_vector_score= score; diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c index e02981e615..db601010bf 100644 --- a/libavcodec/svq3.c +++ b/libavcodec/svq3.c @@ -285,8 +285,8 @@ static inline void svq3_mc_dir_part (MpegEncContext *s, emu = 1; } - mx = clip (mx, -16, (s->h_edge_pos - width + 15)); - my = clip (my, -16, (s->v_edge_pos - height + 15)); + mx = av_clip (mx, -16, (s->h_edge_pos - width + 15)); + my = av_clip (my, -16, (s->v_edge_pos - height + 15)); } /* form component predictions */ @@ -361,8 +361,8 @@ static inline int svq3_mc_dir (H264Context *h, int size, int mode, int dir, int } /* clip motion vector prediction to frame border */ - mx = clip (mx, extra_width - 6*x, h_edge_pos - 6*x); - my = clip (my, extra_width - 6*y, v_edge_pos - 6*y); + mx = av_clip (mx, extra_width - 6*x, h_edge_pos - 6*x); + my = av_clip (my, extra_width - 6*y, v_edge_pos - 6*y); /* get (optional) motion vector differential */ if (mode == PREDICT_MODE) { diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c index e9bc6b7cbe..1596752a90 100644 --- a/libavcodec/truemotion2.c +++ b/libavcodec/truemotion2.c @@ -384,7 +384,7 @@ static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *de d = deltas[i + j * 4]; ct += d; last[i] += ct; - Y[i] = clip_uint8(last[i]); + Y[i] = av_clip_uint8(last[i]); } Y += stride; ctx->D[j] = ct; @@ -735,7 +735,7 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) src = (ctx->cur?ctx->Y2:ctx->Y1); for(j = 0; j < ctx->avctx->height; j++){ for(i = 0; i < ctx->avctx->width; i++){ - Y[i] = clip_uint8(*src++); + Y[i] = av_clip_uint8(*src++); } Y += p->linesize[0]; } @@ -743,7 +743,7 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) src = (ctx->cur?ctx->U2:ctx->U1); for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ - U[i] = clip_uint8(*src++); + U[i] = av_clip_uint8(*src++); } U += p->linesize[2]; } @@ -751,7 +751,7 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) src = (ctx->cur?ctx->V2:ctx->V1); for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ - V[i] = clip_uint8(*src++); + V[i] = av_clip_uint8(*src++); } V += p->linesize[1]; } diff --git a/libavcodec/truespeech.c b/libavcodec/truespeech.c index d37aa9454a..a03f2a0ced 100644 --- a/libavcodec/truespeech.c +++ b/libavcodec/truespeech.c @@ -281,7 +281,7 @@ static void truespeech_synth(TSContext *dec, int16_t *out, int quart) for(k = 0; k < 8; k++) sum += ptr0[k] * ptr1[k]; sum = (sum + (out[i] << 12) + 0x800) >> 12; - out[i] = clip(sum, -0x7FFE, 0x7FFE); + out[i] = av_clip(sum, -0x7FFE, 0x7FFE); for(k = 7; k > 0; k--) ptr0[k] = ptr0[k - 1]; ptr0[0] = out[i]; @@ -311,11 +311,11 @@ static void truespeech_synth(TSContext *dec, int16_t *out, int quart) sum += ptr0[k] * t[k]; for(k = 7; k > 0; k--) ptr0[k] = ptr0[k - 1]; - ptr0[0] = clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); + ptr0[0] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); sum = ((ptr0[1] * (dec->filtval - (dec->filtval >> 2))) >> 4) + sum; sum = sum - (sum >> 3); - out[i] = clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); + out[i] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE); } } diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c index b2fc5ffcbc..c1a8b4c4d1 100644 --- a/libavcodec/vc1.c +++ b/libavcodec/vc1.c @@ -821,10 +821,10 @@ static void vc1_mc_1mv(VC1Context *v, int dir) uvsrc_x = s->mb_x * 8 + (uvmx >> 2); uvsrc_y = s->mb_y * 8 + (uvmy >> 2); - src_x = clip( src_x, -16, s->mb_width * 16); - src_y = clip( src_y, -16, s->mb_height * 16); - uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8); - uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8); + src_x = av_clip( src_x, -16, s->mb_width * 16); + src_y = av_clip( src_y, -16, s->mb_height * 16); + uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); + uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); srcY += src_y * s->linesize + src_x; srcU += uvsrc_y * s->uvlinesize + uvsrc_x; @@ -944,8 +944,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n) src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2); src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2); - src_x = clip( src_x, -16, s->mb_width * 16); - src_y = clip( src_y, -16, s->mb_height * 16); + src_x = av_clip( src_x, -16, s->mb_width * 16); + src_y = av_clip( src_y, -16, s->mb_height * 16); srcY += src_y * s->linesize + src_x; @@ -1071,8 +1071,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v) uvsrc_x = s->mb_x * 8 + (uvmx >> 2); uvsrc_y = s->mb_y * 8 + (uvmy >> 2); - uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8); - uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8); + uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); + uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) @@ -1499,8 +1499,8 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb) shift = v->lumshift << 6; } for(i = 0; i < 256; i++) { - v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6); - v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); + v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); + v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); } } if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) @@ -1740,8 +1740,8 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) shift = v->lumshift << 6; } for(i = 0; i < 256; i++) { - v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6); - v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); + v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); + v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); } v->use_ic = 1; } @@ -2116,10 +2116,10 @@ static void vc1_interp_mc(VC1Context *v) uvsrc_x = s->mb_x * 8 + (uvmx >> 2); uvsrc_y = s->mb_y * 8 + (uvmy >> 2); - src_x = clip( src_x, -16, s->mb_width * 16); - src_y = clip( src_y, -16, s->mb_height * 16); - uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8); - uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8); + src_x = av_clip( src_x, -16, s->mb_width * 16); + src_y = av_clip( src_y, -16, s->mb_height * 16); + uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); + uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); srcY += src_y * s->linesize + src_x; srcU += uvsrc_y * s->uvlinesize + uvsrc_x; diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c index f19f266d1a..6102c09601 100644 --- a/libavcodec/vc1dsp.c +++ b/libavcodec/vc1dsp.c @@ -355,7 +355,7 @@ static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int mode, tptr = tmp; for(j = 0; j < 11; j++) { for(i = 0; i < 8; i++) - tptr[i] = clip_uint8(vc1_mspel_filter(src + i, 1, m, r)); + tptr[i] = av_clip_uint8(vc1_mspel_filter(src + i, 1, m, r)); src += stride; tptr += 8; } @@ -365,7 +365,7 @@ static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int mode, tptr = tmp + 8; for(j = 0; j < 8; j++) { for(i = 0; i < 8; i++) - dst[i] = clip_uint8(vc1_mspel_filter(tptr + i, 8, m, r)); + dst[i] = av_clip_uint8(vc1_mspel_filter(tptr + i, 8, m, r)); dst += stride; tptr += 8; } diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c index e0f958cbeb..69e8a44d3c 100644 --- a/libavcodec/vmdav.c +++ b/libavcodec/vmdav.c @@ -462,7 +462,7 @@ static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data, s->predictors[chan] -= vmdaudio_table[buf[i] & 0x7F]; else s->predictors[chan] += vmdaudio_table[buf[i]]; - s->predictors[chan] = clip(s->predictors[chan], -32768, 32767); + s->predictors[chan] = av_clip(s->predictors[chan], -32768, 32767); out[i] = s->predictors[chan]; chan ^= stereo; } diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 94d5cd62ff..23d97bf9cc 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -633,7 +633,7 @@ static void init_dequantizer(Vp3DecodeContext *s) int qmin= 8<<(inter + !i); int qscale= i ? ac_scale_factor : dc_scale_factor; - s->qmat[inter][plane][i]= clip((qscale * coeff)/100 * 4, qmin, 4096); + s->qmat[inter][plane][i]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); } } } @@ -1729,8 +1729,8 @@ static void horizontal_filter(unsigned char *first_pixel, int stride, (first_pixel[-2] - first_pixel[ 1]) +3*(first_pixel[ 0] - first_pixel[-1]); filter_value = bounding_values[(filter_value + 4) >> 3]; - first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value); - first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value); + first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value); + first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value); } } @@ -1746,8 +1746,8 @@ static void vertical_filter(unsigned char *first_pixel, int stride, (first_pixel[2 * nstride] - first_pixel[ stride]) +3*(first_pixel[0 ] - first_pixel[nstride]); filter_value = bounding_values[(filter_value + 4) >> 3]; - first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value); - first_pixel[0] = clip_uint8(first_pixel[0] - filter_value); + first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value); + first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value); } } diff --git a/libavcodec/vp5.c b/libavcodec/vp5.c index ac953c7aa3..2edacc4cac 100644 --- a/libavcodec/vp5.c +++ b/libavcodec/vp5.c @@ -164,7 +164,7 @@ static void vp5_parse_coeff_models(vp56_context_t *s) for (pt=0; pt<2; pt++) for (ctx=0; ctx<36; ctx++) for (node=0; node<5; node++) - s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254); + s->coeff_model_dcct[pt][ctx][node] = av_clip(((s->coeff_model_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254); /* coeff_model_acct is a linear combination of coeff_model_ract */ for (ct=0; ct<3; ct++) @@ -172,7 +172,7 @@ static void vp5_parse_coeff_models(vp56_context_t *s) for (cg=0; cg<3; cg++) for (ctx=0; ctx<6; ctx++) for (node=0; node<5; node++) - s->coeff_model_acct[pt][ct][cg][ctx][node] = clip(((s->coeff_model_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254); + s->coeff_model_acct[pt][ct][cg][ctx][node] = av_clip(((s->coeff_model_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254); } static void vp5_parse_coeff(vp56_context_t *s) diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c index 62683180db..d0165b67fe 100644 --- a/libavcodec/vp56.c +++ b/libavcodec/vp56.c @@ -308,8 +308,8 @@ static void vp56_edge_filter(vp56_context_t *s, uint8_t *yuv, for (i=0; i<12; i++) { v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3; v = s->adjust(v, t); - yuv[-pix_inc] = clip_uint8(yuv[-pix_inc] + v); - yuv[0] = clip_uint8(yuv[0] - v); + yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v); + yuv[0] = av_clip_uint8(yuv[0] - v); yuv += line_inc; } } diff --git a/libavcodec/vp6.c b/libavcodec/vp6.c index df4ebf87dd..9a2afd8fa0 100644 --- a/libavcodec/vp6.c +++ b/libavcodec/vp6.c @@ -236,7 +236,7 @@ static void vp6_parse_coeff_models(vp56_context_t *s) for (pt=0; pt<2; pt++) for (ctx=0; ctx<3; ctx++) for (node=0; node<5; node++) - s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255); + s->coeff_model_dcct[pt][ctx][node] = av_clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255); } static void vp6_parse_vector_adjustment(vp56_context_t *s, vp56_mv_t *vect) @@ -395,7 +395,7 @@ static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, int stride, for (y=0; y<8; y++) { for (x=0; x<8; x++) { - dst[x] = clip_uint8(( src[x-delta ] * weights[0] + dst[x] = av_clip_uint8(( src[x-delta ] * weights[0] + src[x ] * weights[1] + src[x+delta ] * weights[2] + src[x+2*delta] * weights[3] + 64) >> 7); @@ -434,7 +434,7 @@ static void vp6_filter_diag4(uint8_t *dst, uint8_t *src, int stride, for (y=0; y<11; y++) { for (x=0; x<8; x++) { - t[x] = clip_uint8(( src[x-1] * h_weights[0] + t[x] = av_clip_uint8(( src[x-1] * h_weights[0] + src[x ] * h_weights[1] + src[x+1] * h_weights[2] + src[x+2] * h_weights[3] + 64) >> 7); @@ -446,7 +446,7 @@ static void vp6_filter_diag4(uint8_t *dst, uint8_t *src, int stride, t = tmp + 8; for (y=0; y<8; y++) { for (x=0; x<8; x++) { - dst[x] = clip_uint8(( t[x-8 ] * v_weights[0] + dst[x] = av_clip_uint8(( t[x-8 ] * v_weights[0] + t[x ] * v_weights[1] + t[x+8 ] * v_weights[2] + t[x+16] * v_weights[3] + 64) >> 7); diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c index f3d4f0f23d..d57eaa5e4a 100644 --- a/libavcodec/wmv2.c +++ b/libavcodec/wmv2.c @@ -641,8 +641,8 @@ void ff_mspel_motion(MpegEncContext *s, /* WARNING: do no forget half pels */ v_edge_pos = s->v_edge_pos; - src_x = clip(src_x, -16, s->width); - src_y = clip(src_y, -16, s->height); + src_x = av_clip(src_x, -16, s->width); + src_y = av_clip(src_y, -16, s->height); if(src_x<=-16 || src_x >= s->width) dxy &= ~3; @@ -688,10 +688,10 @@ void ff_mspel_motion(MpegEncContext *s, src_x = s->mb_x * 8 + mx; src_y = s->mb_y * 8 + my; - src_x = clip(src_x, -8, s->width >> 1); + src_x = av_clip(src_x, -8, s->width >> 1); if (src_x == (s->width >> 1)) dxy &= ~1; - src_y = clip(src_y, -8, s->height >> 1); + src_y = av_clip(src_y, -8, s->height >> 1); if (src_y == (s->height >> 1)) dxy &= ~2; offset = (src_y * uvlinesize) + src_x; diff --git a/libavutil/common.h b/libavutil/common.h index 057dce46a6..eb531d3d73 100644 --- a/libavutil/common.h +++ b/libavutil/common.h @@ -163,7 +163,7 @@ static inline int mid_pred(int a, int b, int c) * @param amax maximum value of the clip range * @return clipped value */ -static inline int clip(int a, int amin, int amax) +static inline int av_clip(int a, int amin, int amax) { if (a < amin) return amin; else if (a > amax) return amax; @@ -175,7 +175,7 @@ static inline int clip(int a, int amin, int amax) * @param a value to clip * @return clipped value */ -static inline uint8_t clip_uint8(int a) +static inline uint8_t av_clip_uint8(int a) { if (a&(~255)) return (-a)>>31; else return a;