diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c index 35b98a9ae2..ee89148ef4 100644 --- a/libavcodec/aaccoder.c +++ b/libavcodec/aaccoder.c @@ -161,7 +161,7 @@ static av_always_inline float quantize_and_encode_band_cost_template( di = t - CLIPPED_ESCAPE; curbits += 21; } else { - int c = av_clip(quant(t, Q), 0, 8191); + int c = av_clip_uintp2(quant(t, Q), 13); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } @@ -191,7 +191,7 @@ static av_always_inline float quantize_and_encode_band_cost_template( if (BT_ESC) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) { - int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191); + int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); diff --git a/libavcodec/ac3dsp.c b/libavcodec/ac3dsp.c index 25bd6e3577..933550bfdc 100644 --- a/libavcodec/ac3dsp.c +++ b/libavcodec/ac3dsp.c @@ -125,7 +125,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd, band_end = FFMIN(band_end, end); for (; bin < band_end; bin++) { - int address = av_clip((psd[bin] - m) >> 5, 0, 63); + int address = av_clip_uintp2((psd[bin] - m) >> 5, 6); bap[bin] = bap_tab[address]; } } while (end > band_end); diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c index 788fcada21..fac695843c 100644 --- a/libavcodec/cavs.c +++ b/libavcodec/cavs.c @@ -90,9 +90,9 @@ static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b) } #define SET_PARAMS \ - alpha = alpha_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)]; \ - beta = beta_tab[av_clip(qp_avg + h->beta_offset, 0, 63)]; \ - tc = tc_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)]; + alpha = alpha_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; \ + beta = beta_tab[av_clip_uintp2(qp_avg + h->beta_offset, 6)]; \ + tc = tc_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; /** * in-loop deblocking filter for a single macroblock diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c index e7058b7ee1..a1b3aa78c9 100644 --- a/libavcodec/g722dec.c +++ b/libavcodec/g722dec.c @@ -112,13 +112,13 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, ilow = get_bits(&gb, 6 - skip); skip_bits(&gb, skip); - rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10) - + c->band[0].s_predictor, -16384, 16383); + rlow = av_clip_intp2((c->band[0].scale_factor * quantizer_table[ilow] >> 10) + + c->band[0].s_predictor, 14); ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip)); dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383); + rhigh = av_clip_intp2(dhigh + c->band[1].s_predictor, 14); ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh); diff --git a/libavcodec/g722enc.c b/libavcodec/g722enc.c index e67ccf5324..3943622137 100644 --- a/libavcodec/g722enc.c +++ b/libavcodec/g722enc.c @@ -225,9 +225,9 @@ static void g722_encode_trellis(G722Context *c, int trellis, if (k < 0) continue; - decoded = av_clip((cur_node->state.scale_factor * + decoded = av_clip_intp2((cur_node->state.scale_factor * ff_g722_low_inv_quant6[k] >> 10) - + cur_node->state.s_predictor, -16384, 16383); + + cur_node->state.s_predictor, 14); dec_diff = xlow - decoded; #define STORE_NODE(index, UPDATE, VALUE)\ @@ -284,8 +284,7 @@ static void g722_encode_trellis(G722Context *c, int trellis, dhigh = cur_node->state.scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - decoded = av_clip(dhigh + cur_node->state.s_predictor, - -16384, 16383); + decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14); dec_diff = xhigh - decoded; STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh); diff --git a/libavcodec/g726.c b/libavcodec/g726.c index 62aeb797e8..9ad91f238e 100644 --- a/libavcodec/g726.c +++ b/libavcodec/g726.c @@ -218,7 +218,7 @@ static int16_t g726_decode(G726Context* c, int I) c->b[i] = 0; } else { /* This is a bit crazy, but it really is +255 not +256 */ - fa1 = av_clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255); + fa1 = av_clip_intp2((-c->a[0]*c->pk[0]*pk0)>>5, 8); c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7); c->a[1] = av_clip(c->a[1], -12288, 12288); diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c index f98389868d..855526ed03 100644 --- a/libavcodec/h264_direct.c +++ b/libavcodec/h264_direct.c @@ -37,13 +37,13 @@ static int get_scale_factor(H264Context *const h, int poc, int poc1, int i) { int poc0 = h->ref_list[0][i].poc; - int td = av_clip(poc1 - poc0, -128, 127); + int td = av_clip_int8(poc1 - poc0); if (td == 0 || h->ref_list[0][i].long_ref) { return 256; } else { - int tb = av_clip(poc - poc0, -128, 127); + int tb = av_clip_int8(poc - poc0); int tx = (16384 + (FFABS(td) >> 1)) / td; - return av_clip((tb * tx + 32) >> 6, -1024, 1023); + return av_clip_intp2((tb * tx + 32) >> 6, 10); } } diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index 4eb2767a5c..e1e87ebe68 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -853,9 +853,9 @@ static void implicit_weight_table(H264Context *h, int field) int w = 32; if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) { int poc1 = h->ref_list[1][ref1].poc; - int td = av_clip(poc1 - poc0, -128, 127); + int td = av_clip_int8(poc1 - poc0); if (td) { - int tb = av_clip(cur_poc - poc0, -128, 127); + int tb = av_clip_int8(cur_poc - poc0); int tx = (16384 + (FFABS(td) >> 1)) / td; int dist_scale_factor = (tb * tx + 32) >> 8; if (dist_scale_factor >= -64 && dist_scale_factor <= 128) diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c index da2727fdf5..c8cf00b69c 100644 --- a/libavcodec/motionpixels.c +++ b/libavcodec/motionpixels.c @@ -212,13 +212,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y) p = mp_get_yuv_from_rgb(mp, x - 1, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); - p.y = av_clip(p.y, 0, 31); + p.y = av_clip_uintp2(p.y, 5); if ((x & 3) == 0) { if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); - p.v = av_clip(p.v, -32, 31); + p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); - p.u = av_clip(p.u, -32, 31); + p.u = av_clip_intp2(p.u, 5); mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p; } else { p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v; @@ -242,12 +242,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb) p = mp_get_yuv_from_rgb(mp, 0, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); - p.y = av_clip(p.y, 0, 31); + p.y = av_clip_uintp2(p.y, 5); if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); - p.v = av_clip(p.v, -32, 31); + p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); - p.u = av_clip(p.u, -32, 31); + p.u = av_clip_intp2(p.u, 5); } mp->vpt[y] = p; mp_set_rgb_from_yuv(mp, 0, y, &p); diff --git a/libavcodec/opus_celt.c b/libavcodec/opus_celt.c index a6220e556c..d453e2d2e9 100644 --- a/libavcodec/opus_celt.c +++ b/libavcodec/opus_celt.c @@ -1909,7 +1909,7 @@ static void celt_decode_bands(CeltContext *s, OpusRangeCoder *rc) s->remaining2 = totalbits - consumed - 1; if (i <= s->codedbands - 1) { int curr_balance = s->remaining / FFMIN(3, s->codedbands-i); - b = av_clip(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 0, 16383); + b = av_clip_uintp2(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 14); } else b = 0; diff --git a/libavcodec/opus_silk.c b/libavcodec/opus_silk.c index 3552484542..f881325d7b 100644 --- a/libavcodec/opus_silk.c +++ b/libavcodec/opus_silk.c @@ -1077,7 +1077,7 @@ static inline void silk_decode_lpc(SilkContext *s, SilkFrame *frame, weight = y + ((213 * fpart * y) >> 16); value = cur * 128 + (lsf_res[i] * 16384) / weight; - nlsf[i] = av_clip(value, 0, 32767); + nlsf[i] = av_clip_uintp2(value, 15); } /* stabilize the NLSF coefficients */ @@ -1288,8 +1288,8 @@ static void silk_decode_frame(SilkContext *s, OpusRangeCoder *rc, } else { /* gain is coded relative */ int delta_gain = opus_rc_getsymbol(rc, silk_model_gain_delta); - log_gain = av_clip(FFMAX((delta_gain<<1) - 16, - frame->log_gain + delta_gain - 4), 0, 63); + log_gain = av_clip_uintp2(FFMAX((delta_gain<<1) - 16, + frame->log_gain + delta_gain - 4), 6); } frame->log_gain = log_gain; diff --git a/libavcodec/takdec.c b/libavcodec/takdec.c index 16ccdba9b1..60e80d590a 100644 --- a/libavcodec/takdec.c +++ b/libavcodec/takdec.c @@ -487,7 +487,7 @@ static int decode_subframe(TAKDecContext *s, int32_t *decoded, v += s->adsp.scalarproduct_int16(&s->residues[i], filter, FFALIGN(filter_order, 16)); - v = (av_clip(v >> filter_quant, -8192, 8191) << dshift) - *decoded; + v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded; *decoded++ = v; s->residues[filter_order + i] = v >> dshift; } @@ -657,7 +657,7 @@ static int decorrelate(TAKDecContext *s, int c1, int c2, int length) v += s->adsp.scalarproduct_int16(&s->residues[i], filter, FFALIGN(filter_order, 16)); - p1[i] = (av_clip(v >> 10, -8192, 8191) << dshift) - p1[i]; + p1[i] = (av_clip_intp2(v >> 10, 13) << dshift) - p1[i]; } emms_c(); diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 22cf1e502d..1cfe56ec8c 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -181,7 +181,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd) yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16); yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height); } - alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1); + alpha = av_clip_uint16(sd->coefs[1][6]); for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) { int width = v->output_width>>!!plane;