summaryrefslogtreecommitdiff
path: root/randraw.c
diff options
context:
space:
mode:
authortege <tege@gmplib.org>2000-05-11 21:51:56 +0200
committertege <tege@gmplib.org>2000-05-11 21:51:56 +0200
commit090992c107965aa7eebe93bd4ddfeb85704c7b91 (patch)
treef3e7e43381baed0250c64334948ee90d2f6cdf8c /randraw.c
parentcc38cd16fe1336312cfc6d63c0f1046963d408d1 (diff)
downloadgmp-090992c107965aa7eebe93bd4ddfeb85704c7b91.tar.gz
(lc): Major overhaul (pending rewrite).
(_gmp_rand): Rewrite.
Diffstat (limited to 'randraw.c')
-rw-r--r--randraw.c240
1 files changed, 73 insertions, 167 deletions
diff --git a/randraw.c b/randraw.c
index ba570deeb..4118bda48 100644
--- a/randraw.c
+++ b/randraw.c
@@ -2,7 +2,7 @@
length NBITS in RP. RP must have enough space allocated to hold
NBITS.
-Copyright (C) 1999, 2000 Free Software Foundation, Inc.
+Copyright (C) 1999, 2000 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
@@ -37,7 +37,7 @@ Third Edition, Addison Wesley, 1998, pp. 184-185.]
X is the seed and the result
a is chosen so that
- a mod 8 = 5 [3.2.1.2] and [3.2.1.3]
+ a mod 8 = 5 [3.2.1.2] and [3.2.1.3]
.01m < a < .99m
its binary or decimal digits is not a simple, regular pattern
it has no large quotients when Euclid's algorithm is used to find
@@ -47,7 +47,7 @@ Third Edition, Addison Wesley, 1998, pp. 184-185.]
c has no factor in common with m (c=1 or c=a can be good)
m is large (2^30)
is a power of 2 [3.2.1.1]
-
+
The least significant digits of the generated number are not very
random. It should be regarded as a random fraction X/m. To get a
random integer between 0 and n-1, multiply X/m by n and truncate.
@@ -79,10 +79,10 @@ which gives
X = a * (X % ((long) m / a)) -
(M % a) * ((long) (X / ((long) m / a)))
-Since m is prime, the least-significant bits of X are just as random as
+Since m is prime, the least-significant bits of X are just as random as
the most-significant bits. */
-/* Blum, Blum, and Shub.
+/* Blum, Blum, and Shub.
[Bruce Schneier, "Applied Cryptography", Second Edition, John Wiley
& Sons, Inc., 1996, pp. 417-418.]
@@ -91,15 +91,15 @@ the most-significant bits. */
modulo 4. The product of those numbers, n, is a blum integer.
Choose another random integer, x, which is relatively prime to n.
Compute
- x[0] = x^2 mod n
+ x[0] = x^2 mod n
That's the seed for the generator."
To generate a random bit, compute
- x[i] = x[i-1]^2 mod n
+ x[i] = x[i-1]^2 mod n
The least significant bit of x[i] is the one we want.
We can use more than one bit from x[i], namely the
- log2(bitlength of x[i])
+ log2(bitlength of x[i])
least significant bits of x[i].
So, for a 32-bit seed we get 5 bits per computation.
@@ -126,10 +126,9 @@ lc (rp, rstate)
{
mp_ptr tp, seedp, ap;
mp_size_t ta;
- mp_size_t tn, seedn, an, mn;
+ mp_size_t tn, seedn, an;
mp_size_t retval;
int shiftcount = 0;
- mp_limb_t tlimb;
unsigned long int m2exp;
mp_limb_t c;
TMP_DECL (mark);
@@ -175,92 +174,31 @@ lc (rp, rstate)
ASSERT_ALWAYS (m2exp != 0); /* FIXME. */
TMP_MARK (mark);
- mn = (m2exp + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
- ta = MAX (an + seedn + 1, mn);
+ ta = an + seedn + 1;
tp = (mp_ptr) TMP_ALLOC (ta * BYTES_PER_MP_LIMB);
MPN_ZERO (tp, ta);
/* t = a * seed */
if (seedn >= an)
- tlimb = mpn_mul (tp, seedp, seedn, ap, an);
+ mpn_mul_basecase (tp, seedp, seedn, ap, an);
else
- tlimb = mpn_mul (tp, ap, an, seedp, seedn);
- tn = an + seedn - (tlimb == 0);
+ mpn_mul_basecase (tp, ap, an, seedp, seedn);
+ tn = an + seedn;
/* t = t + c */
- if (mpn_add_1 (tp, tp, tn, c))
- {
- tp[tn] = 1; /* Store carry. */
- tn++;
- }
+ mpn_incr_u (tp, c);
/* t = t % m */
if (m2exp != 0)
{
- /* M is a power of 2. There is no need for a division. Trim
- the size and clear some of the bits in the (new) most
- significant limb. */
-
- /* If M is bigger than the maximum possible size of T, nothing
- has to be done at all. */
- if (m2exp < tn * BITS_PER_MP_LIMB)
- {
- mp_size_t msli = m2exp / BITS_PER_MP_LIMB;
- mp_size_t savebits = m2exp % BITS_PER_MP_LIMB;
-
- if (savebits != 0)
- tp[msli] &= (((mp_limb_t) 1 << savebits) - 1);
- else
- msli--; /* Correction. */
+ /* M is a power of 2. The mod operation is trivial. */
- tn = msli + 1;
- }
+ tp[m2exp / BITS_PER_MP_LIMB] &= ((mp_limb_t) 1 << m2exp % BITS_PER_MP_LIMB) - 1;
+ tn = (m2exp + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
}
else
{
- /* M is not a power of 2. The modulus operation is done with
- mpn_divrem() after normalizing M and compensating this by
- shifting 't2'. */
abort (); /* FIXME. */
-#if 0
- mp_ptr mp, mcopyp;
- mp_size_t mn;
-
- /* Store normalized copy of 'm' in 'mcopy'. */
- /* FIXME: Assumption: m != 0 */
- mp = PTR (rstate->algdata.lc->m);
- mn = SIZ (rstate->algdata.lc->m);
- count_leading_zeros (shiftcount, mp[mn - 1]);
- if (shiftcount != 0)
- {
- mcopyp = TMP_ALLOC (mn * BYTES_PER_MP_LIMB);
- mpn_lshift (mcopyp, mp, mn, shiftcount);
- }
- else
- mcopyp = mp;
-
- /* Shift t2 left the same amount that m was shifted. */
- if (shiftcount != 0)
- {
- tlimb = mpn_lshift (t2p, t2p, t2n, shiftcount);
- if (tlimb)
- {
- t2p[t2n] = tlimb;
- t2n++;
- }
- }
-
- mpn_divrem (t2p + mn, 0, t2p, t2n, mcopyp, mn);
- t2n = mn;
- /* Remainder is in t2. */
-
- /* Shift down remainder. */
- if (shiftcount != 0)
- {
- mpn_rshift (t2p, t2p, t2n, shiftcount);
- MPN_NORMALIZE (t2p, t2n);
- }
-#endif /* 0 */
}
/* Save result as next seed. */
@@ -270,14 +208,17 @@ lc (rp, rstate)
if (m2exp != 0)
{
/* Discard the lower half of the result. */
- mp_size_t discardb = m2exp / 2;
+ unsigned long int discardb = m2exp / 2;
mp_size_t discardl = discardb / BITS_PER_MP_LIMB;
tn -= discardl;
if (tn > 0)
{
if (discardb % BITS_PER_MP_LIMB != 0)
- mpn_rshift (rp, tp + discardl, tn, discardb % BITS_PER_MP_LIMB);
+ {
+ mpn_rshift (tp, tp + discardl, tn, discardb % BITS_PER_MP_LIMB);
+ MPN_COPY (rp, tp, (discardb + BITS_PER_MP_LIMB -1) / BITS_PER_MP_LIMB);
+ }
else /* Even limb boundary. */
MPN_COPY_INCR (rp, tp + discardl, tn);
}
@@ -291,7 +232,7 @@ lc (rp, rstate)
/* Return number of valid bits in the result. */
if (m2exp != 0)
- retval = m2exp / 2 + m2exp % 2;
+ retval = (m2exp + 1) / 2;
else
retval = SIZ (rstate->algdata.lc->m) * BITS_PER_MP_LIMB - shiftcount;
return retval;
@@ -315,7 +256,7 @@ lc_test (mp_ptr rp, gmp_randstate_t s, const int evenbits)
{
mpn_lshift (rp, rp, rn, 1);
if (f % 2 == ! evenbits)
- rp[0] += 1;
+ rp[0] += 1;
}
return nbits;
@@ -333,114 +274,79 @@ _gmp_rand (rp, rstate, nbits)
#endif
{
mp_size_t rn; /* Size of R. */
- mp_size_t nbits_stored; /* Bits stored in R so far. */
- mp_size_t ri; /* Index for current limb in R. */
-#ifdef RAWRANDEBUG
- static int evenbit = 0;
-
- evenbit = ! evenbit;
-#endif
rn = (nbits + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
- MPN_ZERO (rp, rn); /* Clear destination. */
switch (rstate->alg)
{
case GMP_RAND_ALG_LC:
{
+ unsigned long int rbitpos;
+ int chunk_nbits;
mp_ptr tp;
mp_size_t tn;
- unsigned long int nrandbits;
- mp_size_t shiftc;
- mp_limb_t tlimb;
TMP_DECL (lcmark);
- /* Temp space. */
TMP_MARK (lcmark);
- tp = (mp_ptr) TMP_ALLOC ((rn + 1) * BYTES_PER_MP_LIMB);
-
- /* Main loop:
- Fill RP with NBITS random bits.
- RP is built from least significant limb and up.
- RN is the number of limbs in RP.
- NBITS_STORED is the number of bits stored in RP so far.
- RI is the index in RP to currently most significant limb,
- i.e. where next chunk of random bits are to be copied.
-
- i) Get random bits from lc() and store them in TP.
- NRANDBITS is the number of random bits in TP.
- TN is the number of limbs in TP.
-
- ii) Shift TP left as many steps as necessary for copying TP into
- RP.
- Mask in least significant limb of TP into (currently) most
- significant limb of RP.
-
- iii) Copy the rest of the limbs in TP to RP. */
-
- ri = 0;
- nbits_stored = 0;
- while (nbits_stored < nbits)
- {
-#ifndef RAWRANDEBUG
- nrandbits = lc (tp, rstate);
-#else
- nrandbits = lc_test (tp, rstate, evenbit);
-#endif
- tn = (nrandbits + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
+ chunk_nbits = rstate->algdata.lc->m2exp / 2;
+ tn = (chunk_nbits + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
- /* Shift the bits in place for copying into
- destination. */
- shiftc = nbits_stored % BITS_PER_MP_LIMB;
- if (shiftc != 0)
- {
- tlimb = mpn_lshift (tp, tp, tn, shiftc);
- if (tlimb != 0)
- tp[tn++] = tlimb;
- }
+ tp = (mp_ptr) TMP_ALLOC (tn * BYTES_PER_MP_LIMB);
- /* Mask in least significant limb. */
- rp[ri] |= tp[0];
+ rbitpos = 0;
+ while (rbitpos + chunk_nbits <= nbits)
+ {
+ mp_ptr r2p = rp + rbitpos / BITS_PER_MP_LIMB;
- /* Is rp[ri] full yet? */
- if (nrandbits >= BITS_PER_MP_LIMB - shiftc)
+ if (rbitpos % BITS_PER_MP_LIMB != 0)
{
- ri++;
- nbits_stored += BITS_PER_MP_LIMB - shiftc;
- nrandbits -= BITS_PER_MP_LIMB - shiftc;
+ mp_limb_t savelimb, rcy;
+ /* Target of of new chunk is not bit aligned. Use temp space
+ and align things by shifting it up. */
+ lc (tp, rstate);
+ savelimb = r2p[0];
+ rcy = mpn_lshift (r2p, tp, tn, rbitpos % BITS_PER_MP_LIMB);
+ r2p[0] |= savelimb;
+/* bogus */ if ((chunk_nbits % BITS_PER_MP_LIMB + rbitpos % BITS_PER_MP_LIMB)
+ > BITS_PER_MP_LIMB)
+ r2p[tn] = rcy;
}
else
{
- nbits_stored += nrandbits;
- nrandbits = 0;
- }
-
- if (nbits_stored >= nbits)
- break;
-
- /* More bits to store? */
- if (nrandbits > 0)
- {
- mp_size_t storec;
-
- /* Don't store more than the caller has asked for. */
- if (nrandbits > BITS_PER_MP_LIMB * (rn - ri))
- nrandbits = BITS_PER_MP_LIMB * (rn - ri);
- storec = nrandbits / BITS_PER_MP_LIMB
- + (nrandbits % BITS_PER_MP_LIMB != 0);
-
- MPN_COPY (rp + ri, tp + 1, storec);
- ri += storec;
- nbits_stored += nrandbits;
+ /* Target of of new chunk is bit aligned. Let `lc' put bits
+ directly into our target variable. */
+ lc (r2p, rstate);
}
+ rbitpos += chunk_nbits;
}
- /* Clear excess bits (most significant ones). */
- if (nbits_stored > nbits)
+ /* Handle last [0..chunk_nbits) bits. */
+ if (rbitpos != nbits)
{
- mp_size_t nsave = nbits % BITS_PER_MP_LIMB;
- rp[rn - 1] &= (((mp_limb_t) 1 << nsave) - 1);
+ mp_ptr r2p = rp + rbitpos / BITS_PER_MP_LIMB;
+ int last_nbits = nbits - rbitpos;
+ tn = (last_nbits + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB;
+ lc (tp, rstate);
+ if (rbitpos % BITS_PER_MP_LIMB != 0)
+ {
+ mp_limb_t savelimb, rcy;
+ /* Target of of new chunk is not bit aligned. Use temp space
+ and align things by shifting it up. */
+ savelimb = r2p[0];
+ rcy = mpn_lshift (r2p, tp, tn, rbitpos % BITS_PER_MP_LIMB);
+ r2p[0] |= savelimb;
+ if (rbitpos + tn * BITS_PER_MP_LIMB - rbitpos % BITS_PER_MP_LIMB < nbits)
+ r2p[tn] = rcy;
+ }
+ else
+ {
+ MPN_COPY (r2p, tp, tn);
+ }
+ /* Mask off top bits if needed. */
+ if (nbits % BITS_PER_MP_LIMB != 0)
+ rp[nbits / BITS_PER_MP_LIMB]
+ &= ~ ((~(mp_limb_t) 0) << nbits % BITS_PER_MP_LIMB);
}
TMP_FREE (lcmark);