summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog12
-rw-r--r--gmp-impl.h1
-rw-r--r--mpbsd/min.c2
-rw-r--r--mpbsd/xtom.c2
-rw-r--r--mpf/get_str.c3
-rw-r--r--mpf/set_str.c4
-rw-r--r--mpn/generic/pre_divrem_1.c2
-rw-r--r--mpn/generic/set_str.c16
-rw-r--r--mpn/generic/sizeinbase.c6
-rw-r--r--mpq/get_str.c2
-rw-r--r--mpz/inp_str.c5
-rw-r--r--mpz/n_pow_ui.c2
-rw-r--r--mpz/out_str.c2
-rw-r--r--mpz/set_str.c4
-rw-r--r--printf/doprntf.c2
15 files changed, 38 insertions, 27 deletions
diff --git a/ChangeLog b/ChangeLog
index 3e0841ea2..7a15ab23c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2009-07-06 Torbjorn Granlund <tege@gmplib.org>
+
+ * gmp-impl.h (__mp_bases): Remove this alias.
+
+ * mpf/get_str.c: Use less overflow prone expression for computing limb
+ allocation.
+ * mpz/inp_str.c: Likewise.
+ * mpf/set_str.c: Likewise.
+ * mpz/set_str.c: Likewise.
+
2009-07-03 Niels Möller <nisse@lysator.liu.se>
* mpn/generic/gcd_1.c (mpn_gcd_1): Use masking tricks to reduce
@@ -5,6 +15,8 @@
2009-06-28 Torbjorn Granlund <tege@gmplib.org>
+ * demos/factorize.c (factor_using_pollard_rho): Rewrite.
+
* mpz/clears.c: New file.
* mpq/clears.c: New file.
* mpf/clears.c: New file.
diff --git a/gmp-impl.h b/gmp-impl.h
index 8382a863e..80373c7d9 100644
--- a/gmp-impl.h
+++ b/gmp-impl.h
@@ -2364,7 +2364,6 @@ struct bases
};
#define mp_bases __MPN(bases)
-#define __mp_bases __MPN(bases)
__GMP_DECLSPEC extern const struct bases mp_bases[257];
diff --git a/mpbsd/min.c b/mpbsd/min.c
index e5c1d7435..113cc173a 100644
--- a/mpbsd/min.c
+++ b/mpbsd/min.c
@@ -77,7 +77,7 @@ min (MINT *dest)
ungetc (c, stdin);
- dest_size = str_size / __mp_bases[10].chars_per_limb + 1;
+ dest_size = str_size / mp_bases[10].chars_per_limb + 1;
if (dest->_mp_alloc < dest_size)
_mp_realloc (dest, dest_size);
diff --git a/mpbsd/xtom.c b/mpbsd/xtom.c
index fe41865d0..75fa88a81 100644
--- a/mpbsd/xtom.c
+++ b/mpbsd/xtom.c
@@ -76,7 +76,7 @@ xtom (const char *str)
str_size = s - begs;
- xsize = str_size / __mp_bases[16].chars_per_limb + 1;
+ xsize = str_size / mp_bases[16].chars_per_limb + 1;
x->_mp_alloc = xsize;
x->_mp_d = (mp_ptr) (*__gmp_allocate_func) (xsize * BYTES_PER_MP_LIMB);
diff --git a/mpf/get_str.c b/mpf/get_str.c
index d4be6b21b..30b12e7c2 100644
--- a/mpf/get_str.c
+++ b/mpf/get_str.c
@@ -174,7 +174,8 @@ mpf_get_str (char *dbuf, mp_exp_t *exp, int base, size_t n_digits, mpf_srcptr u)
conversion.) */
tstr = (unsigned char *) TMP_ALLOC (n_digits + 2 * GMP_LIMB_BITS + 3);
- n_limbs_needed = 2 + ((mp_size_t) (n_digits / mp_bases[base].chars_per_bit_exactly)) / GMP_NUMB_BITS;
+ n_limbs_needed = 2 + (mp_size_t)
+ (n_digits / (GMP_NUMB_BITS * mp_bases[base].chars_per_bit_exactly));
if (ue <= n_limbs_needed)
{
diff --git a/mpf/set_str.c b/mpf/set_str.c
index 0417303a3..ce8a6bac3 100644
--- a/mpf/set_str.c
+++ b/mpf/set_str.c
@@ -250,8 +250,8 @@ mpf_set_str (mpf_ptr x, const char *str, int base)
str_size = n_chars_needed;
#endif
- ma = (((mp_size_t) (str_size / mp_bases[base].chars_per_bit_exactly))
- / GMP_NUMB_BITS + 2);
+ ma = 2 + (mp_size_t)
+ (str_size / (GMP_NUMB_BITS * mp_bases[base].chars_per_bit_exactly));
mp = TMP_ALLOC_LIMBS (ma);
mn = mpn_set_str (mp, (unsigned char *) begs, str_size, base);
diff --git a/mpn/generic/pre_divrem_1.c b/mpn/generic/pre_divrem_1.c
index 6badf6319..c5a028894 100644
--- a/mpn/generic/pre_divrem_1.c
+++ b/mpn/generic/pre_divrem_1.c
@@ -33,7 +33,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
The main reason for a separate shift==0 case is that not all CPUs give
zero for "n0 >> BITS_PER_MP_LIMB" which would arise in the general case
- code used on shift==0. shift==0 is also reasonably common in __mp_bases
+ code used on shift==0. shift==0 is also reasonably common in mp_bases
big_base, for instance base==10 on a 64-bit limb.
Under shift!=0 it would be possible to call mpn_lshift to adjust the
diff --git a/mpn/generic/set_str.c b/mpn/generic/set_str.c
index 975cfb0da..db916e8c5 100644
--- a/mpn/generic/set_str.c
+++ b/mpn/generic/set_str.c
@@ -69,7 +69,7 @@ mpn_set_str (mp_ptr rp, const unsigned char *str, size_t str_len, int base)
int next_bitpos;
mp_limb_t res_digit;
mp_size_t size;
- int bits_per_indigit = __mp_bases[base].big_base;
+ int bits_per_indigit = mp_bases[base].big_base;
size = 0;
res_digit = 0;
@@ -107,7 +107,7 @@ mpn_set_str (mp_ptr rp, const unsigned char *str, size_t str_len, int base)
TMP_MARK;
- chars_per_limb = __mp_bases[base].chars_per_limb;
+ chars_per_limb = mp_bases[base].chars_per_limb;
un = str_len / chars_per_limb + 1;
@@ -139,9 +139,9 @@ mpn_set_str_compute_powtab (powers_t *powtab, mp_ptr powtab_mem, mp_size_t un, i
powtab_mem_ptr = powtab_mem;
- chars_per_limb = __mp_bases[base].chars_per_limb;
- big_base = __mp_bases[base].big_base;
- big_base_inverted = __mp_bases[base].big_base_inverted;
+ chars_per_limb = mp_bases[base].chars_per_limb;
+ big_base = mp_bases[base].big_base;
+ big_base_inverted = mp_bases[base].big_base_inverted;
count_leading_zeros (normalization_steps, big_base);
p = powtab_mem_ptr;
@@ -278,11 +278,11 @@ mpn_bc_set_str (mp_ptr rp, const unsigned char *str, size_t str_len, int base)
mp_limb_t res_digit;
ASSERT (base >= 2);
- ASSERT (base < numberof (__mp_bases));
+ ASSERT (base < numberof (mp_bases));
ASSERT (str_len >= 1);
- big_base = __mp_bases[base].big_base;
- chars_per_limb = __mp_bases[base].chars_per_limb;
+ big_base = mp_bases[base].big_base;
+ chars_per_limb = mp_bases[base].chars_per_limb;
size = 0;
for (i = chars_per_limb; i < str_len; i += chars_per_limb)
diff --git a/mpn/generic/sizeinbase.c b/mpn/generic/sizeinbase.c
index edd10b544..cfa57ea35 100644
--- a/mpn/generic/sizeinbase.c
+++ b/mpn/generic/sizeinbase.c
@@ -37,7 +37,7 @@ mpn_sizeinbase (mp_srcptr xp, mp_size_t xsize, int base)
ASSERT (xsize >= 0);
ASSERT (base >= 2);
- ASSERT (base < numberof (__mp_bases));
+ ASSERT (base < numberof (mp_bases));
/* Special case for X == 0. */
if (xsize == 0)
@@ -50,9 +50,9 @@ mpn_sizeinbase (mp_srcptr xp, mp_size_t xsize, int base)
if (POW2_P (base))
{
/* Special case for powers of 2, giving exact result. */
- lb_base = __mp_bases[base].big_base;
+ lb_base = mp_bases[base].big_base;
return (totbits + lb_base - 1) / lb_base;
}
else
- return (size_t) (totbits * __mp_bases[base].chars_per_bit_exactly) + 1;
+ return (size_t) (totbits * mp_bases[base].chars_per_bit_exactly) + 1;
}
diff --git a/mpq/get_str.c b/mpq/get_str.c
index 9b3f7ffbb..4079a933b 100644
--- a/mpq/get_str.c
+++ b/mpq/get_str.c
@@ -38,7 +38,7 @@ mpq_get_str (char *str, int base, mpq_srcptr q)
chars per bit of num and den. +3 for sign, slash and '\0'. */
str_alloc = ((size_t) ((ABS (q->_mp_num._mp_size) + q->_mp_den._mp_size)
* BITS_PER_MP_LIMB
- * __mp_bases[ABS(base)].chars_per_bit_exactly))
+ * mp_bases[ABS(base)].chars_per_bit_exactly))
+ 5;
str = (char *) (*__gmp_allocate_func) (str_alloc);
}
diff --git a/mpz/inp_str.c b/mpz/inp_str.c
index 4c1fe35b3..05c8cde90 100644
--- a/mpz/inp_str.c
+++ b/mpz/inp_str.c
@@ -151,9 +151,8 @@ mpz_inp_str_nowhite (mpz_ptr x, FILE *stream, int base, int c, size_t nread)
}
else
{
- xsize = (((mp_size_t)
- (str_size / __mp_bases[base].chars_per_bit_exactly))
- / GMP_NUMB_BITS + 2);
+ xsize = 2 + (mp_size_t)
+ (str_size / (GMP_NUMB_BITS * mp_bases[base].chars_per_bit_exactly));
MPZ_REALLOC (x, xsize);
/* Convert the byte array in base BASE to our bignum format. */
diff --git a/mpz/n_pow_ui.c b/mpz/n_pow_ui.c
index 4f3f4979b..1dfc0cc70 100644
--- a/mpz/n_pow_ui.c
+++ b/mpz/n_pow_ui.c
@@ -55,7 +55,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
The initial powering for bsize==1 into blimb or blimb:blimb_low doesn't
form the biggest possible power of b that fits, only the biggest power of
2 power, ie. b^(2^n). It'd be possible to choose a bigger power, perhaps
- using __mp_bases[b].big_base for small b, and thereby get better value
+ using mp_bases[b].big_base for small b, and thereby get better value
from mpn_mul_1 or mpn_mul_2 in the bignum powering. It's felt that doing
so would be more complicated than it's worth, and could well end up being
a slowdown for small e. For big e on the other hand the algorithm is
diff --git a/mpz/out_str.c b/mpz/out_str.c
index 1b9622522..fbb7624fc 100644
--- a/mpz/out_str.c
+++ b/mpz/out_str.c
@@ -72,7 +72,7 @@ mpz_out_str (FILE *stream, int base, mpz_srcptr x)
TMP_MARK;
str_size = ((size_t) (x_size * BITS_PER_MP_LIMB
- * __mp_bases[base].chars_per_bit_exactly)) + 3;
+ * mp_bases[base].chars_per_bit_exactly)) + 3;
str = (unsigned char *) TMP_ALLOC (str_size);
/* Move the number to convert into temporary space, since mpn_get_str
diff --git a/mpz/set_str.c b/mpz/set_str.c
index 9af15b1b5..550c4866b 100644
--- a/mpz/set_str.c
+++ b/mpz/set_str.c
@@ -122,8 +122,8 @@ mpz_set_str (mpz_ptr x, const char *str, int base)
str_size = s - begs;
- xsize = (((mp_size_t) (str_size / __mp_bases[base].chars_per_bit_exactly))
- / GMP_NUMB_BITS + 2);
+ xsize = 2 + (mp_size_t)
+ (str_size / (GMP_NUMB_BITS * mp_bases[base].chars_per_bit_exactly));
MPZ_REALLOC (x, xsize);
/* Convert the byte array in base BASE to our bignum format. */
diff --git a/printf/doprntf.c b/printf/doprntf.c
index e713a21a0..15419dd1e 100644
--- a/printf/doprntf.c
+++ b/printf/doprntf.c
@@ -91,7 +91,7 @@ __gmp_doprnt_mpf (const struct doprnt_funs_t *funs,
digit and subtract that from prec. In either case add 2 so the
round to nearest can be applied accurately. */
ndigits = prec + 2
- + EXP(f) * (__mp_bases[ABS(p->base)].chars_per_limb + (EXP(f)>=0));
+ + EXP(f) * (mp_bases[ABS(p->base)].chars_per_limb + (EXP(f)>=0));
ndigits = MAX (ndigits, 1);
break;