summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Bodrato <bodrato@mail.dm.unipi.it>2019-11-19 15:47:28 +0100
committerMarco Bodrato <bodrato@mail.dm.unipi.it>2019-11-19 15:47:28 +0100
commitb741989fd983b2fbc2fc1529cfc02634b29d89fe (patch)
tree19484c2d792105ea386b01deb5c7b429e0f0eefd
parent83db8ace04f95ec835a0445e933bba305b48406f (diff)
downloadgmp-b741989fd983b2fbc2fc1529cfc02634b29d89fe.tar.gz
mini-gmp/mini-gmp.c: Indent
-rw-r--r--mini-gmp/mini-gmp.c210
1 files changed, 105 insertions, 105 deletions
diff --git a/mini-gmp/mini-gmp.c b/mini-gmp/mini-gmp.c
index a1e9047c7..cba05c504 100644
--- a/mini-gmp/mini-gmp.c
+++ b/mini-gmp/mini-gmp.c
@@ -145,27 +145,27 @@ see https://www.gnu.org/licenses/. */
w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS); \
} \
else { \
- mp_limb_t __x0, __x1, __x2, __x3; \
- unsigned __ul, __vl, __uh, __vh; \
- mp_limb_t __u = (u), __v = (v); \
+ mp_limb_t __x0, __x1, __x2, __x3; \
+ unsigned __ul, __vl, __uh, __vh; \
+ mp_limb_t __u = (u), __v = (v); \
\
- __ul = __u & GMP_LLIMB_MASK; \
- __uh = __u >> (GMP_LIMB_BITS / 2); \
- __vl = __v & GMP_LLIMB_MASK; \
- __vh = __v >> (GMP_LIMB_BITS / 2); \
+ __ul = __u & GMP_LLIMB_MASK; \
+ __uh = __u >> (GMP_LIMB_BITS / 2); \
+ __vl = __v & GMP_LLIMB_MASK; \
+ __vh = __v >> (GMP_LIMB_BITS / 2); \
\
- __x0 = (mp_limb_t) __ul * __vl; \
- __x1 = (mp_limb_t) __ul * __vh; \
- __x2 = (mp_limb_t) __uh * __vl; \
- __x3 = (mp_limb_t) __uh * __vh; \
+ __x0 = (mp_limb_t) __ul * __vl; \
+ __x1 = (mp_limb_t) __ul * __vh; \
+ __x2 = (mp_limb_t) __uh * __vl; \
+ __x3 = (mp_limb_t) __uh * __vh; \
\
- __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
- __x1 += __x2; /* but this indeed can */ \
- if (__x1 < __x2) /* did we get it? */ \
- __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
+ __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
\
- (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
- (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
+ (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
+ (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
} \
} while (0)
@@ -784,116 +784,116 @@ mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
(((unsigned long) u1 << GMP_LIMB_BITS_MUL_3 / 3) + u0);
}
else {
- mp_limb_t r, m;
+ mp_limb_t r, m;
- if (GMP_ULONG_BITS >= GMP_LIMB_BITS * 2)
- {
- /* Set m to the 2/1 inverse of u1. */
- m = ~((unsigned long) u1 << GMP_LIMB_BITS_MUL_3 / 3) / u1;
- r = ~(m * u1);
- }
- else
- {
- mp_limb_t p, ql;
- unsigned ul, uh, qh;
+ if (GMP_ULONG_BITS >= GMP_LIMB_BITS * 2)
+ {
+ /* Set m to the 2/1 inverse of u1. */
+ m = ~((unsigned long) u1 << GMP_LIMB_BITS_MUL_3 / 3) / u1;
+ r = ~(m * u1);
+ }
+ else
+ {
+ mp_limb_t p, ql;
+ unsigned ul, uh, qh;
- /* For notation, let b denote the half-limb base, so that B = b^2.
- Split u1 = b uh + ul. */
- ul = u1 & GMP_LLIMB_MASK;
- uh = u1 >> (GMP_LIMB_BITS / 2);
+ /* For notation, let b denote the half-limb base, so that B = b^2.
+ Split u1 = b uh + ul. */
+ ul = u1 & GMP_LLIMB_MASK;
+ uh = u1 >> (GMP_LIMB_BITS / 2);
- /* Approximation of the high half of quotient. Differs from the 2/1
- inverse of the half limb uh, since we have already subtracted
- u0. */
- qh = ~u1 / uh;
+ /* Approximation of the high half of quotient. Differs from the 2/1
+ inverse of the half limb uh, since we have already subtracted
+ u0. */
+ qh = ~u1 / uh;
- /* Adjust to get a half-limb 3/2 inverse, i.e., we want
+ /* Adjust to get a half-limb 3/2 inverse, i.e., we want
- qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
- = floor( (b (~u) + b-1) / u),
+ qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
+ = floor( (b (~u) + b-1) / u),
- and the remainder
+ and the remainder
- r = b (~u) + b-1 - qh (b uh + ul)
- = b (~u - qh uh) + b-1 - qh ul
+ r = b (~u) + b-1 - qh (b uh + ul)
+ = b (~u - qh uh) + b-1 - qh ul
- Subtraction of qh ul may underflow, which implies adjustments.
- But by normalization, 2 u >= B > qh ul, so we need to adjust by
- at most 2.
- */
+ Subtraction of qh ul may underflow, which implies adjustments.
+ But by normalization, 2 u >= B > qh ul, so we need to adjust by
+ at most 2.
+ */
- r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
+ r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
- p = (mp_limb_t) qh * ul;
- /* Adjustment steps taken from udiv_qrnnd_c */
- if (r < p)
- {
- qh--;
- r += u1;
- if (r >= u1) /* i.e. we didn't get carry when adding to r */
+ p = (mp_limb_t) qh * ul;
+ /* Adjustment steps taken from udiv_qrnnd_c */
if (r < p)
{
qh--;
r += u1;
+ if (r >= u1) /* i.e. we didn't get carry when adding to r */
+ if (r < p)
+ {
+ qh--;
+ r += u1;
+ }
}
- }
- r -= p;
+ r -= p;
- /* Low half of the quotient is
+ /* Low half of the quotient is
- ql = floor ( (b r + b-1) / u1).
+ ql = floor ( (b r + b-1) / u1).
- This is a 3/2 division (on half-limbs), for which qh is a
- suitable inverse. */
+ This is a 3/2 division (on half-limbs), for which qh is a
+ suitable inverse. */
- p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
- /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
- work, it is essential that ql is a full mp_limb_t. */
- ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
+ p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
+ /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
+ work, it is essential that ql is a full mp_limb_t. */
+ ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
- /* By the 3/2 trick, we don't need the high half limb. */
- r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
+ /* By the 3/2 trick, we don't need the high half limb. */
+ r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
- if (r >= (p << (GMP_LIMB_BITS / 2)))
- {
- ql--;
- r += u1;
- }
- m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
- if (r >= u1)
- {
- m++;
- r -= u1;
- }
- }
+ if (r >= (p << (GMP_LIMB_BITS / 2)))
+ {
+ ql--;
+ r += u1;
+ }
+ m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
+ if (r >= u1)
+ {
+ m++;
+ r -= u1;
+ }
+ }
- /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
- 3/2 inverse. */
- if (u0 > 0)
- {
- mp_limb_t th, tl;
- r = ~r;
- r += u0;
- if (r < u0)
- {
- m--;
- if (r >= u1)
- {
- m--;
- r -= u1;
- }
- r -= u1;
- }
- gmp_umul_ppmm (th, tl, u0, m);
- r += th;
- if (r < th)
- {
- m--;
- m -= ((r > u1) | ((r == u1) & (tl > u0)));
- }
- }
+ /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
+ 3/2 inverse. */
+ if (u0 > 0)
+ {
+ mp_limb_t th, tl;
+ r = ~r;
+ r += u0;
+ if (r < u0)
+ {
+ m--;
+ if (r >= u1)
+ {
+ m--;
+ r -= u1;
+ }
+ r -= u1;
+ }
+ gmp_umul_ppmm (th, tl, u0, m);
+ r += th;
+ if (r < th)
+ {
+ m--;
+ m -= ((r > u1) | ((r == u1) & (tl > u0)));
+ }
+ }
- return m;
+ return m;
}
}
@@ -3345,7 +3345,7 @@ mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
mpz_fac_ui (t, k);
for (; k > 0; --k)
- mpz_mul_ui (r, r, n--);
+ mpz_mul_ui (r, r, n--);
mpz_divexact (r, r, t);
mpz_clear (t);