diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2015-02-21 11:24:11 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2015-02-21 11:27:24 +0100 |
commit | 66d79aa2e1351ac38f3469bba6a8b1fcaefa0c20 (patch) | |
tree | c078cd42cba697c15473aeaae0a127bd0e64a74a | |
parent | 1253091d6f7b2910f3a53ab648fb3d0cb1a4b4ee (diff) | |
parent | 9abc80f1ed673141326341e26a05c3e1f78576d0 (diff) | |
download | ffmpeg-66d79aa2e1351ac38f3469bba6a8b1fcaefa0c20.tar.gz |
Merge commit '9abc80f1ed673141326341e26a05c3e1f78576d0'
* commit '9abc80f1ed673141326341e26a05c3e1f78576d0':
libavcodec: Make use of av_clip functions
Conflicts:
libavcodec/takdec.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/aaccoder.c | 4 | ||||
-rw-r--r-- | libavcodec/ac3dsp.c | 2 | ||||
-rw-r--r-- | libavcodec/cavs.c | 6 | ||||
-rw-r--r-- | libavcodec/g722dec.c | 6 | ||||
-rw-r--r-- | libavcodec/g722enc.c | 7 | ||||
-rw-r--r-- | libavcodec/g726.c | 2 | ||||
-rw-r--r-- | libavcodec/h264_direct.c | 6 | ||||
-rw-r--r-- | libavcodec/h264_slice.c | 4 | ||||
-rw-r--r-- | libavcodec/motionpixels.c | 12 | ||||
-rw-r--r-- | libavcodec/opus_celt.c | 2 | ||||
-rw-r--r-- | libavcodec/opus_silk.c | 6 | ||||
-rw-r--r-- | libavcodec/takdec.c | 4 | ||||
-rw-r--r-- | libavcodec/vc1dec.c | 2 |
13 files changed, 31 insertions, 32 deletions
diff --git a/libavcodec/aaccoder.c b/libavcodec/aaccoder.c index 5bf6a9c155..b4d2009838 100644 --- a/libavcodec/aaccoder.c +++ b/libavcodec/aaccoder.c @@ -161,7 +161,7 @@ static av_always_inline float quantize_and_encode_band_cost_template( di = t - CLIPPED_ESCAPE; curbits += 21; } else { - int c = av_clip(quant(t, Q), 0, 8191); + int c = av_clip_uintp2(quant(t, Q), 13); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } @@ -191,7 +191,7 @@ static av_always_inline float quantize_and_encode_band_cost_template( if (BT_ESC) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) { - int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191); + int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); diff --git a/libavcodec/ac3dsp.c b/libavcodec/ac3dsp.c index b746817c9d..fe87b5bde5 100644 --- a/libavcodec/ac3dsp.c +++ b/libavcodec/ac3dsp.c @@ -125,7 +125,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd, band_end = FFMIN(band_end, end); for (; bin < band_end; bin++) { - int address = av_clip((psd[bin] - m) >> 5, 0, 63); + int address = av_clip_uintp2((psd[bin] - m) >> 5, 6); bap[bin] = bap_tab[address]; } } while (end > band_end); diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c index 83073901bc..10a25d8749 100644 --- a/libavcodec/cavs.c +++ b/libavcodec/cavs.c @@ -91,9 +91,9 @@ static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b) } #define SET_PARAMS \ - alpha = alpha_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)]; \ - beta = beta_tab[av_clip(qp_avg + h->beta_offset, 0, 63)]; \ - tc = tc_tab[av_clip(qp_avg + h->alpha_offset, 0, 63)]; + alpha = alpha_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; \ + beta = beta_tab[av_clip_uintp2(qp_avg + h->beta_offset, 6)]; \ + tc = tc_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; /** * in-loop deblocking filter for a single macroblock diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c index 0b207ffbca..22e90a3079 100644 --- a/libavcodec/g722dec.c +++ b/libavcodec/g722dec.c @@ -110,13 +110,13 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, ilow = get_bits(&gb, 6 - skip); skip_bits(&gb, skip); - rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10) - + c->band[0].s_predictor, -16384, 16383); + rlow = av_clip_intp2((c->band[0].scale_factor * quantizer_table[ilow] >> 10) + + c->band[0].s_predictor, 14); ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip)); dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383); + rhigh = av_clip_intp2(dhigh + c->band[1].s_predictor, 14); ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh); diff --git a/libavcodec/g722enc.c b/libavcodec/g722enc.c index 5a7bfb25fe..38432f5006 100644 --- a/libavcodec/g722enc.c +++ b/libavcodec/g722enc.c @@ -226,9 +226,9 @@ static void g722_encode_trellis(G722Context *c, int trellis, if (k < 0) continue; - decoded = av_clip((cur_node->state.scale_factor * + decoded = av_clip_intp2((cur_node->state.scale_factor * ff_g722_low_inv_quant6[k] >> 10) - + cur_node->state.s_predictor, -16384, 16383); + + cur_node->state.s_predictor, 14); dec_diff = xlow - decoded; #define STORE_NODE(index, UPDATE, VALUE)\ @@ -285,8 +285,7 @@ static void g722_encode_trellis(G722Context *c, int trellis, dhigh = cur_node->state.scale_factor * ff_g722_high_inv_quant[ihigh] >> 10; - decoded = av_clip(dhigh + cur_node->state.s_predictor, - -16384, 16383); + decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14); dec_diff = xhigh - decoded; STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh); diff --git a/libavcodec/g726.c b/libavcodec/g726.c index b0331d8643..5b9986a212 100644 --- a/libavcodec/g726.c +++ b/libavcodec/g726.c @@ -219,7 +219,7 @@ static int16_t g726_decode(G726Context* c, int I) c->b[i] = 0; } else { /* This is a bit crazy, but it really is +255 not +256 */ - fa1 = av_clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255); + fa1 = av_clip_intp2((-c->a[0]*c->pk[0]*pk0)>>5, 8); c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7); c->a[1] = av_clip(c->a[1], -12288, 12288); diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c index 3289fe4700..2e4c2b0b73 100644 --- a/libavcodec/h264_direct.c +++ b/libavcodec/h264_direct.c @@ -37,13 +37,13 @@ static int get_scale_factor(H264Context *const h, int poc, int poc1, int i) { int poc0 = h->ref_list[0][i].poc; - int td = av_clip(poc1 - poc0, -128, 127); + int td = av_clip_int8(poc1 - poc0); if (td == 0 || h->ref_list[0][i].long_ref) { return 256; } else { - int tb = av_clip(poc - poc0, -128, 127); + int tb = av_clip_int8(poc - poc0); int tx = (16384 + (FFABS(td) >> 1)) / td; - return av_clip((tb * tx + 32) >> 6, -1024, 1023); + return av_clip_intp2((tb * tx + 32) >> 6, 10); } } diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index 316a9ef054..70224a5b0b 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -916,9 +916,9 @@ static void implicit_weight_table(H264Context *h, int field) int w = 32; if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) { int poc1 = h->ref_list[1][ref1].poc; - int td = av_clip(poc1 - poc0, -128, 127); + int td = av_clip_int8(poc1 - poc0); if (td) { - int tb = av_clip(cur_poc - poc0, -128, 127); + int tb = av_clip_int8(cur_poc - poc0); int tx = (16384 + (FFABS(td) >> 1)) / td; int dist_scale_factor = (tb * tx + 32) >> 8; if (dist_scale_factor >= -64 && dist_scale_factor <= 128) diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c index 19da10a8ff..84517f990a 100644 --- a/libavcodec/motionpixels.c +++ b/libavcodec/motionpixels.c @@ -232,13 +232,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y) p = mp_get_yuv_from_rgb(mp, x - 1, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); - p.y = av_clip(p.y, 0, 31); + p.y = av_clip_uintp2(p.y, 5); if ((x & 3) == 0) { if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); - p.v = av_clip(p.v, -32, 31); + p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); - p.u = av_clip(p.u, -32, 31); + p.u = av_clip_intp2(p.u, 5); mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p; } else { p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v; @@ -264,12 +264,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb) p = mp_get_yuv_from_rgb(mp, 0, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); - p.y = av_clip(p.y, 0, 31); + p.y = av_clip_uintp2(p.y, 5); if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); - p.v = av_clip(p.v, -32, 31); + p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); - p.u = av_clip(p.u, -32, 31); + p.u = av_clip_intp2(p.u, 5); } mp->vpt[y] = p; mp_set_rgb_from_yuv(mp, 0, y, &p); diff --git a/libavcodec/opus_celt.c b/libavcodec/opus_celt.c index aa11bde85c..e5d615efbd 100644 --- a/libavcodec/opus_celt.c +++ b/libavcodec/opus_celt.c @@ -1909,7 +1909,7 @@ static void celt_decode_bands(CeltContext *s, OpusRangeCoder *rc) s->remaining2 = totalbits - consumed - 1; if (i <= s->codedbands - 1) { int curr_balance = s->remaining / FFMIN(3, s->codedbands-i); - b = av_clip(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 0, 16383); + b = av_clip_uintp2(FFMIN(s->remaining2 + 1, s->pulses[i] + curr_balance), 14); } else b = 0; diff --git a/libavcodec/opus_silk.c b/libavcodec/opus_silk.c index 7a89479fb9..841d1ed25c 100644 --- a/libavcodec/opus_silk.c +++ b/libavcodec/opus_silk.c @@ -1077,7 +1077,7 @@ static inline void silk_decode_lpc(SilkContext *s, SilkFrame *frame, weight = y + ((213 * fpart * y) >> 16); value = cur * 128 + (lsf_res[i] * 16384) / weight; - nlsf[i] = av_clip(value, 0, 32767); + nlsf[i] = av_clip_uintp2(value, 15); } /* stabilize the NLSF coefficients */ @@ -1288,8 +1288,8 @@ static void silk_decode_frame(SilkContext *s, OpusRangeCoder *rc, } else { /* gain is coded relative */ int delta_gain = opus_rc_getsymbol(rc, silk_model_gain_delta); - log_gain = av_clip(FFMAX((delta_gain<<1) - 16, - frame->log_gain + delta_gain - 4), 0, 63); + log_gain = av_clip_uintp2(FFMAX((delta_gain<<1) - 16, + frame->log_gain + delta_gain - 4), 6); } frame->log_gain = log_gain; diff --git a/libavcodec/takdec.c b/libavcodec/takdec.c index 2f0155d557..a453da81ec 100644 --- a/libavcodec/takdec.c +++ b/libavcodec/takdec.c @@ -476,7 +476,7 @@ static int decode_subframe(TAKDecContext *s, int32_t *decoded, s->residues[i + j + 1] * s->filter[j + 1] + s->residues[i + j ] * s->filter[j ]; } - v = (av_clip(v >> filter_quant, -8192, 8191) << dshift) - *decoded; + v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded; *decoded++ = v; s->residues[filter_order + i] = v >> dshift; } @@ -652,7 +652,7 @@ static int decorrelate(TAKDecContext *s, int c1, int c2, int length) s->residues[i ] * s->filter[0]; } - v = (av_clip(v >> 10, -8192, 8191) << dshift) - *p1; + v = (av_clip_intp2(v >> 10, 13) << dshift) - *p1; *p1++ = v; } diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 9668d62247..70694471ce 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -188,7 +188,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd) yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16); yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height); } - alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1); + alpha = av_clip_uint16(sd->coefs[1][6]); for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) { int width = v->output_width>>!!plane; |