diff options
author | vlefevre <vlefevre@280ebfd0-de03-0410-8827-d642c229c3f4> | 2008-11-22 15:49:07 +0000 |
---|---|---|
committer | vlefevre <vlefevre@280ebfd0-de03-0410-8827-d642c229c3f4> | 2008-11-22 15:49:07 +0000 |
commit | 71f5e9220ed8e375d4f66b479772fddccd6956b6 (patch) | |
tree | aabeceaa34acbaac67dbe1bda9a5aeca15d7894f | |
parent | 44e889c6280d7080baecfb55d39224e98552e9f5 (diff) | |
download | mpfr-71f5e9220ed8e375d4f66b479772fddccd6956b6.tar.gz |
r5689 undone: some casts were incorrect (mp_exp_t may be greater than
mp_prec_t, so that casting a mp_exp_t into a mp_prec_t can introduce a
bug). There may be bugs in some cases, but the casts fix the symptom,
not the bug (unless one casts the unsigned type to a signed type that
is *strictly* larger, which is not possible here).
git-svn-id: svn://scm.gforge.inria.fr/svn/mpfr/trunk@5690 280ebfd0-de03-0410-8827-d642c229c3f4
-rw-r--r-- | acosh.c | 2 | ||||
-rw-r--r-- | agm.c | 2 | ||||
-rw-r--r-- | atan.c | 4 | ||||
-rw-r--r-- | cos.c | 4 | ||||
-rw-r--r-- | fma.c | 5 | ||||
-rw-r--r-- | fms.c | 5 | ||||
-rw-r--r-- | gamma.c | 4 | ||||
-rw-r--r-- | get_str.c | 4 | ||||
-rw-r--r-- | li2.c | 15 | ||||
-rw-r--r-- | lngamma.c | 8 | ||||
-rw-r--r-- | modf.c | 4 | ||||
-rw-r--r-- | mpfr-impl.h | 2 | ||||
-rw-r--r-- | mul.c | 4 | ||||
-rw-r--r-- | mulders.c | 3 | ||||
-rw-r--r-- | rec_sqrt.c | 2 | ||||
-rw-r--r-- | root.c | 2 | ||||
-rw-r--r-- | tanh.c | 3 | ||||
-rw-r--r-- | vasprintf.c | 2 | ||||
-rw-r--r-- | yn.c | 2 |
19 files changed, 36 insertions, 41 deletions
@@ -101,7 +101,7 @@ mpfr_acosh (mpfr_ptr y, mpfr_srcptr x , mp_rnd_t rnd_mode) We need to compute ln(x) + ln(2) as 2x can overflow. TODO: write a proof and add an MPFR_ASSERTN. */ mpfr_log (t, x, GMP_RNDN); /* err(log) < 1/2 ulp(t) */ - pln2 = Nt - MPFR_PREC_MIN < (mp_prec_t) MPFR_GET_EXP (t) ? + pln2 = Nt - MPFR_PREC_MIN < MPFR_GET_EXP (t) ? MPFR_PREC_MIN : Nt - MPFR_GET_EXP (t); mpfr_init2 (ln2, pln2); mpfr_const_log2 (ln2, GMP_RNDN); /* err(ln2) < 1/2 ulp(t) */ @@ -152,7 +152,7 @@ mpfr_agm (mpfr_ptr r, mpfr_srcptr op2, mpfr_srcptr op1, mp_rnd_t rnd_mode) err += MPFR_INT_CEIL_LOG2(18 * n + 51); /* 18n+51 should not overflow since n is about log(p) */ /* we should have n+2 <= 2^(p/4) [see algorithms.tex] */ - if (MPFR_LIKELY ((mp_prec_t) MPFR_INT_CEIL_LOG2(n + 2) <= p / 4 && + if (MPFR_LIKELY (MPFR_INT_CEIL_LOG2(n + 2) <= p / 4 && MPFR_CAN_ROUND (v, p - err, q, rnd_mode))) break; /* Stop the loop */ @@ -61,7 +61,7 @@ mpfr_atan_aux (mpfr_ptr y, mpz_ptr p, long r, int m, mpz_t *tab) /* Normalize p */ n = mpz_scan1 (p, 0); mpz_tdiv_q_2exp (p, p, n); /* exact */ - MPFR_ASSERTD ((unsigned long) r > n); + MPFR_ASSERTD (r > n); r -= n; /* since |p/2^r| < 1, and p is a non-zero integer, necessarily r > 0 */ @@ -289,7 +289,7 @@ mpfr_atan (mpfr_ptr atan, mpfr_srcptr x, mp_rnd_t rnd_mode) #endif n0 = MPFR_INT_CEIL_LOG2 ((realprec + sup) + 3); MPFR_ASSERTD (3*n0 > 2); - prec = (realprec + sup) + 1 + MPFR_INT_CEIL_LOG2 ((mp_prec_t) (3*n0-2)); + prec = (realprec + sup) + 1 + MPFR_INT_CEIL_LOG2 (3*n0-2); /* Initialisation */ MPFR_GROUP_REPREC_4 (group, prec, sk, tmp, tmp2, arctgt); @@ -70,7 +70,7 @@ mpfr_cos2_aux (mpfr_ptr f, mpfr_srcptr r) mpz_set_ui (s, 1); /* initialize sum with 1 */ mpz_mul_2exp (s, s, p + q); /* scale all values by 2^(p+q) */ mpz_set (t, s); /* invariant: t is previous term */ - for (i = 1; (mp_prec_t) (m = mpz_sizeinbase (t, 2)) >= q; i += 2) + for (i = 1; (m = mpz_sizeinbase (t, 2)) >= q; i += 2) { /* adjust precision of x to that of t */ l = mpz_sizeinbase (x, 2); @@ -114,7 +114,7 @@ mpfr_cos2_aux (mpfr_ptr f, mpfr_srcptr r) mpz_clear (t); l = (i - 1) / 2; /* number of iterations */ - return 2 * MPFR_INT_CEIL_LOG2 ((mp_prec_t) l + 1) + 1; /* bound is 2l(l+1) */ + return 2 * MPFR_INT_CEIL_LOG2 (l + 1) + 1; /* bound is 2l(l+1) */ } int @@ -148,8 +148,7 @@ mpfr_fma (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z, MPFR_BLOCK_DECL (flags); if (MPFR_GET_EXP (u) > MPFR_GET_EXP (z) && - (mp_prec_t) (MPFR_GET_EXP (u) - MPFR_GET_EXP (z)) - > MPFR_PREC (u)) + MPFR_GET_EXP (u) - MPFR_GET_EXP (z) > MPFR_PREC (u)) { /* |z| < ulp(u)/2, therefore one can use z instead of z/4. */ zz = z; @@ -209,7 +208,7 @@ mpfr_fma (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z, of the result can be EXP(z) - 1). */ diffexp = MPFR_GET_EXP (z) - __gmpfr_emin; pzs = MAX (MPFR_PREC (z), MPFR_PREC (s) + 1); - if ((mp_prec_t) diffexp <= pzs) + if (diffexp <= pzs) { mp_exp_unsigned_t uscale; mpfr_t scaled_v; @@ -150,8 +150,7 @@ mpfr_fms (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z, MPFR_BLOCK_DECL (flags); if (MPFR_GET_EXP (u) > MPFR_GET_EXP (z) && - (mp_prec_t) (MPFR_GET_EXP (u) - MPFR_GET_EXP (z)) - > MPFR_PREC (u)) + MPFR_GET_EXP (u) - MPFR_GET_EXP (z) > MPFR_PREC (u)) { /* |z| < ulp(u)/2, therefore one can use z instead of z/4. */ zz = z; @@ -211,7 +210,7 @@ mpfr_fms (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z, of the result can be EXP(z) - 1). */ diffexp = MPFR_GET_EXP (z) - __gmpfr_emin; pzs = MAX (MPFR_PREC (z), MPFR_PREC (s) + 1); - if ((mp_prec_t) diffexp <= pzs) + if (diffexp <= pzs) { mp_exp_unsigned_t uscale; mpfr_t scaled_v; @@ -40,7 +40,7 @@ mpfr_gamma_2_minus_x_exact (mpfr_srcptr x) (b) if EXP(y) > 1 and EXP(y)-PREC(y) <= 1, w = PREC(y) + 1 (c) if EXP(y) > 1 and EXP(y)-PREC(y) > 1, w = EXP(y) - 1 */ return (MPFR_GET_EXP(x) <= 1) ? MPFR_PREC(x) + 2 - MPFR_GET_EXP(x) - : (((mp_prec_t) MPFR_GET_EXP(x) <= MPFR_PREC(x) + 1) ? MPFR_PREC(x) + 1 + : ((MPFR_GET_EXP(x) <= MPFR_PREC(x) + 1) ? MPFR_PREC(x) + 1 : MPFR_GET_EXP(x) - 1); } @@ -52,7 +52,7 @@ mpfr_gamma_1_minus_x_exact (mpfr_srcptr x) return MPFR_PREC(x) - MPFR_GET_EXP(x); else if (MPFR_GET_EXP(x) <= 0) return MPFR_PREC(x) + 1 - MPFR_GET_EXP(x); - else if (MPFR_PREC(x) >= (mp_prec_t) MPFR_GET_EXP(x)) + else if (MPFR_PREC(x) >= MPFR_GET_EXP(x)) return MPFR_PREC(x) + 1; else return MPFR_GET_EXP(x); @@ -1400,7 +1400,7 @@ mpfr_get_str (char *s, mp_exp_t *e, int b, size_t m, mpfr_srcptr x, mp_rnd_t rnd { int exact; /* exact result */ mp_exp_t exp, g; - mp_prec_t prec; /* precision of the computation */ + mp_exp_t prec; /* precision of the computation */ long err; mp_limb_t *a; mp_exp_t exp_a; @@ -1564,7 +1564,7 @@ mpfr_get_str (char *s, mp_exp_t *e, int b, size_t m, mpfr_srcptr x, mp_rnd_t rnd exp = ((mp_exp_t) m < g) ? g - (mp_exp_t) m : (mp_exp_t) m - g; prec += MPFR_INT_CEIL_LOG2 (prec); /* number of guard bits */ if (exp != 0) /* add maximal exponentiation error */ - prec += 3 * (mp_exp_t) MPFR_INT_CEIL_LOG2 ((mp_prec_t) exp); + prec += 3 * (mp_exp_t) MPFR_INT_CEIL_LOG2 (exp); MPFR_ZIV_INIT (loop, prec); for (;;) @@ -336,7 +336,7 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) mpfr_neg (u, u, GMP_RNDN); /* u = -log(1-x) */ expo_l = MPFR_GET_EXP (u); k = li2_series (s, u, GMP_RNDU); - err = 1 + MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1); + err = 1 + MPFR_INT_CEIL_LOG2 (k + 1); mpfr_sqr (u, u, GMP_RNDU); mpfr_div_2ui (u, u, 2, GMP_RNDU); /* u = log^2(1-x) / 4 */ @@ -416,8 +416,7 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) expo_l = MPFR_GET_EXP (u); k = li2_series (s, u, GMP_RNDN); mpfr_neg (s, s, GMP_RNDN); - err = MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1) + 1; - /* error(s) <= 2^err ulp(s) */ + err = MPFR_INT_CEIL_LOG2 (k + 1) + 1; /* error(s) <= 2^err ulp(s) */ mpfr_sqr (u, u, GMP_RNDN); mpfr_div_2ui (u, u, 2, GMP_RNDN); /* u= log^2(1-1/x)/4 */ @@ -492,7 +491,7 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) mpfr_add (s, s, u, GMP_RNDN); /* error(s) <= (31 + (k+1) * 2^(1-e1) + 2^(1-e2)) ulp(s) see algorithms.tex */ - err = MAX (MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1) + 1 - e1, 1 - e2); + err = MAX (MPFR_INT_CEIL_LOG2 (k + 1) + 1 - e1, 1 - e2); err = 2 + MAX (5, err); if (MPFR_CAN_ROUND (s, (mp_exp_t) m - err, yp, rnd_mode)) break; @@ -528,7 +527,7 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) mpfr_neg (u, u, GMP_RNDN); k = li2_series (s, u, GMP_RNDN); mpfr_neg (s, s, GMP_RNDN); - err = 1 + MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1) - MPFR_GET_EXP (s); + err = 1 + MPFR_INT_CEIL_LOG2 (k + 1) - MPFR_GET_EXP (s); mpfr_ui_sub (xx, 1, x, GMP_RNDN); mpfr_log (v, xx, GMP_RNDU); @@ -584,7 +583,7 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) k = li2_series (s, u, GMP_RNDN); mpfr_neg (s, s, GMP_RNDN); expo_l = MPFR_GET_EXP (u); - err = 1 + MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1) - MPFR_GET_EXP (s); + err = 1 + MPFR_INT_CEIL_LOG2 (k + 1) - MPFR_GET_EXP (s); mpfr_sqr (u, u, GMP_RNDN); mpfr_div_2ui (u, u, 2, GMP_RNDN); /* u = log^2(1-x)/4 */ @@ -641,8 +640,8 @@ mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode) mpfr_mul (w, v, u, GMP_RNDN); mpfr_div_2ui (w, w, 1, GMP_RNDN); /* w = log(-x) * log(1-x) / 2 */ mpfr_sub (s, s, w, GMP_RNDN); - err = 1 + MAX (3, MPFR_INT_CEIL_LOG2 ((mp_prec_t) k + 1) + 1 - - MPFR_GET_EXP (s)) + MPFR_GET_EXP (s); + err = 1 + MAX (3, MPFR_INT_CEIL_LOG2 (k+1) + 1 - MPFR_GET_EXP (s)) + + MPFR_GET_EXP (s); mpfr_sqr (w, v, GMP_RNDN); mpfr_div_2ui (w, w, 2, GMP_RNDN); /* w = log^2(-x) / 4 */ @@ -121,7 +121,7 @@ unit_bit (mpfr_srcptr (x)) return 0; /* |x| < 1 */ prec = MPFR_PREC (x); - if ((mp_prec_t) expo > prec) + if (expo > prec) return 0; /* y is a multiple of 2^(expo-prec), thus an even integer */ /* Now, the unit bit is represented. */ @@ -242,7 +242,7 @@ GAMMA_FUNC (mpfr_ptr y, mpfr_srcptr z0, mp_rnd_t rnd) which would need precision n. */ MPFR_ZIV_NEXT (loop, prec); } - while (prec <= (mp_prec_t) -MPFR_EXP(z0)); + while (prec <= -MPFR_EXP(z0)); MPFR_ZIV_FREE (loop); } #endif @@ -326,7 +326,7 @@ GAMMA_FUNC (mpfr_ptr y, mpfr_srcptr z0, mp_rnd_t rnd) /* log(v*(1+e)) = log(v)+log(1+e) where |e| <= 2^(err_s-w). Since |log(1+e)| <= 2*e for |e| <= 1/4, the error on v is bounded by ulp(v)/2 + 2^(err_s+1-w). */ - if ((mp_prec_t) (err_s + 2) > w) + if (err_s + 2 > w) { w += err_s + 2; } @@ -452,7 +452,7 @@ GAMMA_FUNC (mpfr_ptr y, mpfr_srcptr z0, mp_rnd_t rnd) Bm ++; } mpfr_mul_z (v, t, B[m], GMP_RNDN); /* (1+u)^(10m-7) */ - MPFR_ASSERTD(MPFR_GET_EXP(v) <= (mp_exp_t) - (2 * m + 3)); + MPFR_ASSERTD(MPFR_GET_EXP(v) <= - (2 * m + 3)); mpfr_add (s, s, v, GMP_RNDN); } /* m <= 1/2*Pi*e*z ensures that |v[m]| < 1/2^(2m+3) */ @@ -66,7 +66,7 @@ mpfr_modf (mpfr_ptr iop, mpfr_ptr fop, mpfr_srcptr op, mpfr_rnd_t rnd_mode) ope = MPFR_GET_EXP (op); opq = MPFR_PREC (op); - if (ope <= 0) /* 0 < |op| < 1 */ + if (ope <=0) /* 0 < |op| < 1 */ { inexact = (fop != op) ? mpfr_set (fop, op, rnd_mode) : 0; MPFR_SET_SAME_SIGN (iop, op); @@ -75,7 +75,7 @@ mpfr_modf (mpfr_ptr iop, mpfr_ptr fop, mpfr_srcptr op, mpfr_rnd_t rnd_mode) mpfr_check_range (fop, inexact, rnd_mode); /* set the underflow flag if needed */ MPFR_RET (MPFR_INT_SIGN (op) > 0 ? -2 : +2); } - else if ((mp_prec_t) ope >= opq) /* op has no fractional part */ + else if (ope >= opq) /* op has no fractional part */ { inexact = (iop != op)? mpfr_set (iop, op, rnd_mode) : 0; MPFR_SET_SAME_SIGN (fop, op); diff --git a/mpfr-impl.h b/mpfr-impl.h index f1debc4e2..488018222 100644 --- a/mpfr-impl.h +++ b/mpfr-impl.h @@ -887,7 +887,7 @@ extern unsigned char *mpfr_stack; * Undefined if x is 0 */ #if __MPFR_GNUC(2,95) || __MPFR_ICC(8,1,0) # define MPFR_INT_CEIL_LOG2(x) \ - (MPFR_UNLIKELY ((x) == 1) ? 0 : \ + (MPFR_UNLIKELY ((x) == 1) ? 0 : \ __extension__ ({ int _b; mp_limb_t _limb; \ MPFR_ASSERTN ((x) > 1); \ _limb = (x) - 1; \ @@ -403,7 +403,7 @@ mpfr_mul (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mp_rnd_t rnd_mode) n = MPFR_LIMB_SIZE (a) + 1; n = MIN (n, cn); MPFR_ASSERTD (n >= 1 && 2*n <= k && n <= cn && n <= bn); - p = n * BITS_PER_MP_LIMB - MPFR_INT_CEIL_LOG2 ((mp_prec_t) n + 2); + p = n * BITS_PER_MP_LIMB - MPFR_INT_CEIL_LOG2 (n + 2); bp += bn - n; cp += cn - n; @@ -438,7 +438,7 @@ mpfr_mul (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mp_rnd_t rnd_mode) } /* We will compute with one extra limb */ n++; - p = n * BITS_PER_MP_LIMB - MPFR_INT_CEIL_LOG2 ((mp_prec_t) n + 2); + p = n * BITS_PER_MP_LIMB - MPFR_INT_CEIL_LOG2 (n + 2); /* Due to some nasty reasons we can have only 4 bits */ MPFR_ASSERTD (MPFR_PREC (a) <= p - 4); @@ -91,8 +91,7 @@ mpfr_sqrhigh_n (mp_ptr rp, mp_srcptr np, mp_size_t n) mp_size_t k; MPFR_ASSERTD (MPFR_SQRHIGH_TAB_SIZE > 4); - k = MPFR_LIKELY ((unsigned int) n < MPFR_SQRHIGH_TAB_SIZE) - ? sqrhigh_ktab[n] : 2*n/3; + k = MPFR_LIKELY (n < MPFR_SQRHIGH_TAB_SIZE) ? sqrhigh_ktab[n] : 2*n/3; MPFR_ASSERTD (k == -1 || k == 0 || (k > n/2 && k < n)); if (k < 0) /* we can't use mpn_sqr_basecase here, since it requires diff --git a/rec_sqrt.c b/rec_sqrt.c index 5a002cec0..2851cb272 100644 --- a/rec_sqrt.c +++ b/rec_sqrt.c @@ -488,7 +488,7 @@ mpfr_rec_sqrt (mpfr_ptr r, mpfr_srcptr u, mp_rnd_t rnd_mode) up to a full limb to maximize the chance of rounding, while avoiding to allocate extra space */ wp = rp + 11; - if (wp < (mp_prec_t) rn * BITS_PER_MP_LIMB) + if (wp < rn * BITS_PER_MP_LIMB) wp = rn * BITS_PER_MP_LIMB; for (;;) { @@ -139,7 +139,7 @@ mpfr_root (mpfr_ptr y, mpfr_srcptr x, unsigned long k, mp_rnd_t rnd_mode) /* we now multiply m by 2^(r+k*sh) so that root(m,k) will give exactly n bits: we want k*(n-1)+1 <= size_m + k*sh + r <= k*n i.e. sh = floor ((kn-size_m-r)/k) */ - if ((mp_exp_t) size_m + r > (mp_exp_t) k * (mp_exp_t) n) + if ((mp_exp_t) size_m + r > k * (mp_exp_t) n) sh = 0; /* we already have too many bits */ else sh = (k * (mp_exp_t) n - (mp_exp_t) size_m - r) / k; @@ -126,8 +126,7 @@ mpfr_tanh (mpfr_ptr y, mpfr_srcptr xt , mp_rnd_t rnd_mode) d = MAX(3, d + 1); err = Nt - (d + 1); - if (MPFR_LIKELY ((d <= (mp_exp_t) (Nt / 2)) - && MPFR_CAN_ROUND (t, err, Ny, rnd_mode))) + if (MPFR_LIKELY ((d <= Nt / 2) && MPFR_CAN_ROUND (t, err, Ny, rnd_mode))) { inexact = mpfr_set4 (y, t, rnd_mode, sign); break; diff --git a/vasprintf.c b/vasprintf.c index 11ae68a58..b5fb26312 100644 --- a/vasprintf.c +++ b/vasprintf.c @@ -1282,7 +1282,7 @@ regular_fg (struct number_parts *np, mpfr_srcptr p, np->fp_size = str_len; if (!spec_g && (spec.prec > 0) - && (np->fp_leading_zeros + (int) np->fp_size < spec.prec)) + && (np->fp_leading_zeros + np->fp_size < spec.prec)) /* add missing trailing zeros */ np->fp_trailing_zeros = spec.prec - np->fp_leading_zeros - np->fp_size; @@ -292,7 +292,7 @@ mpfr_yn (mpfr_ptr res, long n, mpfr_srcptr z, mp_rnd_t r) error on 2/Pi/z is less than 7ulp(y). The truncation error is less than 1/4, thus if ulp(y)>=1/4, the total error is less than 8ulp(y), otherwise it is less than 1/4+7/8 <= 2. */ - if ((mp_prec_t) MPFR_EXP(y) + 2 >= MPFR_PREC(y)) /* ulp(y) >= 1/4 */ + if (MPFR_EXP(y) + 2 >= MPFR_PREC(y)) /* ulp(y) >= 1/4 */ err1 = 3; else /* ulp(y) <= 1/8 */ err1 = (mp_exp_t) MPFR_PREC(y) - MPFR_EXP(y) + 1; |