summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS8
-rw-r--r--Makefile.am2
-rw-r--r--gmp-impl.h48
-rw-r--r--mpn/powerpc32/invert_limb.asm2
-rw-r--r--mpn/powerpc64/mode64/sqr_basecase.asm2
-rw-r--r--mpn/s390_32/logops_n.asm2
-rw-r--r--mpn/s390_64/logops_n.asm2
-rw-r--r--mpn/x86/atom/aorrlshC_n.asm2
-rw-r--r--mpn/x86/atom/aorslshC_n.asm6
-rw-r--r--mpn/x86_64/k8/aorrlsh_n.asm6
-rw-r--r--mpn/x86_64/mod_1_1.asm4
-rw-r--r--mpn/x86_64/mullo_basecase.asm4
-rw-r--r--mpz/jacobi.c2
-rw-r--r--tests/mpz/reuse.c6
-rw-r--r--tests/refmpn.c12
-rw-r--r--tune/speed.h32
16 files changed, 70 insertions, 70 deletions
diff --git a/AUTHORS b/AUTHORS
index f399ce345..23c5cc75c 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -51,11 +51,11 @@ Marco Bodrato mpn/generic/toom44_mul.c, toom4_sqr.c, toom53_mul.c,
mullo_n.c, invert.c, invertappr.c.
David Harvey mpn/generic/add_err1_n.c, add_err2_n.c,
- add_err3_n.c, sub_err1_n.c, sub_err2_n.c,
- sub_err3_n.c, mulmid_basecase.c, mulmid_n.c,
- toom42_mulmid.c,
+ add_err3_n.c, sub_err1_n.c, sub_err2_n.c,
+ sub_err3_n.c, mulmid_basecase.c, mulmid_n.c,
+ toom42_mulmid.c,
mpn/x86_64/mul_basecase.asm, aors_err1_n.asm,
- aors_err2_n.asm, aors_err3_n.asm,
+ aors_err2_n.asm, aors_err3_n.asm,
mulmid_basecase.asm,
mpn/x86_64/core2/aors_err1_n.asm.
diff --git a/Makefile.am b/Makefile.am
index d535bd56f..07e4c4e00 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -167,7 +167,7 @@ MPZ_OBJECTS = mpz/abs$U.lo mpz/add$U.lo mpz/add_ui$U.lo \
mpz/gcd_ui$U.lo mpz/gcdext$U.lo mpz/get_d$U.lo mpz/get_d_2exp$U.lo \
mpz/get_si$U.lo mpz/get_str$U.lo mpz/get_ui$U.lo mpz/getlimbn$U.lo \
mpz/hamdist$U.lo \
- mpz/import$U.lo mpz/init$U.lo mpz/init2$U.lo mpz/inits$U.lo \
+ mpz/import$U.lo mpz/init$U.lo mpz/init2$U.lo mpz/inits$U.lo \
mpz/inp_raw$U.lo mpz/inp_str$U.lo mpz/invert$U.lo \
mpz/ior$U.lo mpz/iset$U.lo mpz/iset_d$U.lo mpz/iset_si$U.lo \
mpz/iset_str$U.lo mpz/iset_ui$U.lo mpz/jacobi$U.lo mpz/kronsz$U.lo \
diff --git a/gmp-impl.h b/gmp-impl.h
index 51449edae..737c34c2d 100644
--- a/gmp-impl.h
+++ b/gmp-impl.h
@@ -314,9 +314,9 @@ extern "C" {
/* Usage: TMP_DECL;
- TMP_MARK;
- ptr = TMP_ALLOC (bytes);
- TMP_FREE;
+ TMP_MARK;
+ ptr = TMP_ALLOC (bytes);
+ TMP_FREE;
Small allocations should use TMP_SALLOC, big allocations should use
TMP_BALLOC. Allocations that might be small or big should use TMP_ALLOC.
@@ -1820,11 +1820,11 @@ __GMP_DECLSPEC void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t);
std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
low zeros).
- std cld
- P5 18 16
- P6 46 38
- K6 36 13
- K7 21 20
+ std cld
+ P5 18 16
+ P6 46 38
+ K6 36 13
+ K7 21 20
*/
#ifndef MPN_NORMALIZE
#define MPN_NORMALIZE(DST, NLIMBS) \
@@ -2166,7 +2166,7 @@ __GMP_DECLSPEC mp_limb_t gmp_primesieve (mp_ptr, mp_limb_t);
where FFT_FIRST_K+1 should be used, the second FFT_FIRST_K+2,
etc. See mpn_fft_best_k(). */
#ifndef MUL_FFT_TABLE
-#define MUL_FFT_TABLE \
+#define MUL_FFT_TABLE \
{ MUL_TOOM33_THRESHOLD * 4, /* k=5 */ \
MUL_TOOM33_THRESHOLD * 8, /* k=6 */ \
MUL_TOOM33_THRESHOLD * 16, /* k=7 */ \
@@ -2176,7 +2176,7 @@ __GMP_DECLSPEC mp_limb_t gmp_primesieve (mp_ptr, mp_limb_t);
0 }
#endif
#ifndef SQR_FFT_TABLE
-#define SQR_FFT_TABLE \
+#define SQR_FFT_TABLE \
{ SQR_TOOM3_THRESHOLD * 4, /* k=5 */ \
SQR_TOOM3_THRESHOLD * 8, /* k=6 */ \
SQR_TOOM3_THRESHOLD * 16, /* k=7 */ \
@@ -2941,7 +2941,7 @@ __GMP_DECLSPEC mp_limb_t mpn_invert_limb (mp_limb_t) ATTRIBUTE_CONST;
_p += _t1; \
if (_p < _t1) \
{ \
- _v--; \
+ _v--; \
if (UNLIKELY (_p >= (d1))) \
{ \
if (_p > (d1) || _t0 >= (d0)) \
@@ -3221,17 +3221,17 @@ __GMP_DECLSPEC extern const unsigned char binvert_limb_table[128];
\
if ((a) <= (d)) \
{ \
- /* small a is reasonably likely */ \
- (r) = (d) - (a); \
+ /* small a is reasonably likely */ \
+ (r) = (d) - (a); \
} \
else \
{ \
- unsigned __twos; \
- mp_limb_t __dnorm; \
- count_leading_zeros (__twos, d); \
- __twos -= GMP_NAIL_BITS; \
- __dnorm = (d) << __twos; \
- (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a); \
+ unsigned __twos; \
+ mp_limb_t __dnorm; \
+ count_leading_zeros (__twos, d); \
+ __twos -= GMP_NAIL_BITS; \
+ __dnorm = (d) << __twos; \
+ (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a); \
} \
\
ASSERT_LIMB (r); \
@@ -3304,8 +3304,8 @@ __GMP_DECLSPEC extern const unsigned char binvert_limb_table[128];
int __p = 0; \
do \
{ \
- __p ^= 0x96696996L >> (__n & 0x1F); \
- __n >>= 5; \
+ __p ^= 0x96696996L >> (__n & 0x1F); \
+ __n >>= 5; \
} \
while (__n != 0); \
\
@@ -3378,7 +3378,7 @@ __GMP_DECLSPEC extern const unsigned char binvert_limb_table[128];
#if ! defined (BSWAP_LIMB)
#if GMP_LIMB_BITS == 8
-#define BSWAP_LIMB(dst, src) \
+#define BSWAP_LIMB(dst, src) \
do { (dst) = (src); } while (0)
#endif
#if GMP_LIMB_BITS == 16
@@ -4888,11 +4888,11 @@ extern mp_size_t set_str_precompute_threshold;
#undef FAC_ODD_THRESHOLD
#define FAC_ODD_THRESHOLD fac_odd_threshold
-extern mp_size_t fac_odd_threshold;
+extern mp_size_t fac_odd_threshold;
#undef FAC_DSC_THRESHOLD
#define FAC_DSC_THRESHOLD fac_dsc_threshold
-extern mp_size_t fac_dsc_threshold;
+extern mp_size_t fac_dsc_threshold;
#undef FFT_TABLE_ATTRS
#define FFT_TABLE_ATTRS
diff --git a/mpn/powerpc32/invert_limb.asm b/mpn/powerpc32/invert_limb.asm
index a4a3c9040..84c5de3e3 100644
--- a/mpn/powerpc32/invert_limb.asm
+++ b/mpn/powerpc32/invert_limb.asm
@@ -33,7 +33,7 @@ EXTERN(approx_tab)
ASM_START()
PROLOGUE(mpn_invert_limb)
rlwinm r6, r3, 11, 22, 30 C extract bits 30..22 to pos 2^1
- srwi r10, r3, 11 C extract bits 31..11
+ srwi r10, r3, 11 C extract bits 31..11
LEA( r9, approx_tab) C N.B. clobbers r0 for ELF and Darwin
lhzx r9, r9, r6 C w2
addi r0, r10, 1
diff --git a/mpn/powerpc64/mode64/sqr_basecase.asm b/mpn/powerpc64/mode64/sqr_basecase.asm
index 72ac2d318..d32ef7e35 100644
--- a/mpn/powerpc64/mode64/sqr_basecase.asm
+++ b/mpn/powerpc64/mode64/sqr_basecase.asm
@@ -673,7 +673,7 @@ define(`climb', `r0')
rldicl. r0, n, 0,62 C r0 = n & 3, set cr0
cmpdi cr6, r0, 2
- addi n, n, 2 C compute count...
+ addi n, n, 2 C compute count...
srdi n, n, 2 C ...for ctr
mtctr n C put loop count into ctr
beq cr0, L(xb0)
diff --git a/mpn/s390_32/logops_n.asm b/mpn/s390_32/logops_n.asm
index ed4c0c858..61472acf2 100644
--- a/mpn/s390_32/logops_n.asm
+++ b/mpn/s390_32/logops_n.asm
@@ -84,7 +84,7 @@ ifdef(`VARIANT_1',`
ltr %r1, %r1 C < 256 bytes to copy?
je L(1)
-L(tp): LOGOPC 0(256, rp), 0(vp)
+L(tp): LOGOPC 0(256, rp), 0(vp)
la rp, 256(rp)
la vp, 256(vp)
brct %r1, L(tp)
diff --git a/mpn/s390_64/logops_n.asm b/mpn/s390_64/logops_n.asm
index 10d2ed36d..ae1a1abab 100644
--- a/mpn/s390_64/logops_n.asm
+++ b/mpn/s390_64/logops_n.asm
@@ -83,7 +83,7 @@ ifdef(`VARIANT_1',`
ltgr %r1, %r1 C < 256 bytes to copy?
je L(1)
-L(tp): LOGOPC 0(256, rp), 0(vp)
+L(tp): LOGOPC 0(256, rp), 0(vp)
la rp, 256(rp)
la vp, 256(vp)
brctg %r1, L(tp)
diff --git a/mpn/x86/atom/aorrlshC_n.asm b/mpn/x86/atom/aorrlshC_n.asm
index 337d5e3db..c24dcd8bf 100644
--- a/mpn/x86/atom/aorrlshC_n.asm
+++ b/mpn/x86/atom/aorrlshC_n.asm
@@ -112,7 +112,7 @@ L(oop):
shr $RSH, %ecx
mov 4(vp), %eax
shr %edx
- lea 8(vp), vp
+ lea 8(vp), vp
M4_inst (up), %ebp
lea (%ecx,%eax,M), %edx
mov %ebp, (rp)
diff --git a/mpn/x86/atom/aorslshC_n.asm b/mpn/x86/atom/aorslshC_n.asm
index 4ec0bc8d3..5d6ac986c 100644
--- a/mpn/x86/atom/aorslshC_n.asm
+++ b/mpn/x86/atom/aorslshC_n.asm
@@ -120,7 +120,7 @@ L(oop):
shr $RSH, %eax
mov 4(up), %ecx
add %edx, %edx
- lea 8(up), up
+ lea 8(up), up
M4_inst %ebp, (rp)
lea (%eax,%ecx,M), %eax
@@ -208,12 +208,12 @@ L(oop2):
mov %edx, (rp)
L(enteven2):
mov 4(%ebx), %edx
- lea 8(%ebx), %ebx
+ lea 8(%ebx), %ebx
M4_inst %eax, %edx
mov %edx, 4(rp)
sbb %edx, %edx
shr $RSH, %ecx
- lea 8(rp), rp
+ lea 8(rp), rp
L(entry2):
mov (up), %eax
decl GPARAM_SIZE
diff --git a/mpn/x86_64/k8/aorrlsh_n.asm b/mpn/x86_64/k8/aorrlsh_n.asm
index 3d9c0ae22..dc02bbaf1 100644
--- a/mpn/x86_64/k8/aorrlsh_n.asm
+++ b/mpn/x86_64/k8/aorrlsh_n.asm
@@ -59,8 +59,8 @@ ABI_SUPPORT(DOS64)
ABI_SUPPORT(STD64)
ASM_START()
- TEXT
- ALIGN(16)
+ TEXT
+ ALIGN(16)
PROLOGUE(func)
FUNC_ENTRY(4)
IFDOS(` mov 56(%rsp), %r8d ')
@@ -83,7 +83,7 @@ IFDOS(` mov 56(%rsp), %r8d ')
mov $1, R32(%r8)
shl R8(%rcx), %r8
- mul %r8 C initial multiply
+ mul %r8 C initial multiply
and $3, R32(%r9)
jz L(b0)
diff --git a/mpn/x86_64/mod_1_1.asm b/mpn/x86_64/mod_1_1.asm
index fbd3ba808..0167c35ee 100644
--- a/mpn/x86_64/mod_1_1.asm
+++ b/mpn/x86_64/mod_1_1.asm
@@ -111,8 +111,8 @@ L(top): and B2modb, r2
add %rax, r0
mov r2, %rax
adc %rdx, %rax
- sbb r2, r2
- sub $1, n
+ sbb r2, r2
+ sub $1, n
jnc L(top)
L(reduce_three):
diff --git a/mpn/x86_64/mullo_basecase.asm b/mpn/x86_64/mullo_basecase.asm
index 2c2f832a2..95ccdb5fc 100644
--- a/mpn/x86_64/mullo_basecase.asm
+++ b/mpn/x86_64/mullo_basecase.asm
@@ -90,7 +90,7 @@ L(1): imul %r8, %rax
L(2): mov 8(vp_param), %r11
imul %rax, %r11 C u0 x v1
- mul %r8 C u0 x v0
+ mul %r8 C u0 x v0
mov %rax, (rp)
imul 8(up), %r8 C u1 x v0
lea (%r11, %rdx), %rax
@@ -117,7 +117,7 @@ L(3): mov 8(vp_param), %r9 C v1
add %rax, %rcx
adc %rdx, %r9
mov %r11, %rax
- imul (up), %rax C u0 x v2 -> r2
+ imul (up), %rax C u0 x v2 -> r2
add %rax, %r9
mov %rcx, 8(rp)
mov %r9, 16(rp)
diff --git a/mpz/jacobi.c b/mpz/jacobi.c
index 459aed27a..ff3fd50fd 100644
--- a/mpz/jacobi.c
+++ b/mpz/jacobi.c
@@ -131,7 +131,7 @@ mpz_jacobi (mpz_srcptr a, mpz_srcptr b)
+ btwos > 0, in which case A is always odd
+ asize == bsize == 1, in which case this code path is never
- taken. */
+ taken. */
count_trailing_zeros (btwos, blow);
blow >>= btwos;
diff --git a/tests/mpz/reuse.c b/tests/mpz/reuse.c
index aaef6e1cc..c3beb0e26 100644
--- a/tests/mpz/reuse.c
+++ b/tests/mpz/reuse.c
@@ -102,8 +102,8 @@ struct {
{
/* Don't change order here without changing the code in main(). */
{ mpz_add_ui, "mpz_add_ui", 0 },
- { mpz_mul_ui, "mpz_mul_ui", 0 },
- { mpz_sub_ui, "mpz_sub_ui", 0 },
+ { mpz_mul_ui, "mpz_mul_ui", 0 },
+ { mpz_sub_ui, "mpz_sub_ui", 0 },
{ mpz_fdiv_q_2exp, "mpz_fdiv_q_2exp", 0x1000 },
{ mpz_fdiv_r_2exp, "mpz_fdiv_r_2exp", 0x1000 },
{ mpz_cdiv_q_2exp, "mpz_cdiv_q_2exp", 0x1000 },
@@ -111,7 +111,7 @@ struct {
{ mpz_tdiv_q_2exp, "mpz_tdiv_q_2exp", 0x1000 },
{ mpz_tdiv_r_2exp, "mpz_tdiv_r_2exp", 0x1000 },
{ mpz_mul_2exp, "mpz_mul_2exp", 0x100 },
- { mpz_pow_ui, "mpz_pow_ui", 0x10 }
+ { mpz_pow_ui, "mpz_pow_ui", 0x10 }
};
struct {
diff --git a/tests/refmpn.c b/tests/refmpn.c
index abf82b1de..094f71bd3 100644
--- a/tests/refmpn.c
+++ b/tests/refmpn.c
@@ -624,7 +624,7 @@ refmpn_subcnd_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t size, mp_lim
{ \
mp_size_t i; \
mp_limb_t carry2; \
- \
+ \
ASSERT (refmpn_overlap_fullonly_p (rp, s1p, size)); \
ASSERT (refmpn_overlap_fullonly_p (rp, s2p, size)); \
ASSERT (! refmpn_overlap_p (rp, size, yp, size)); \
@@ -640,7 +640,7 @@ refmpn_subcnd_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p, mp_size_t size, mp_lim
ASSERT_MPN (yp, size); \
\
ep[0] = ep[1] = CNST_LIMB(0); \
- \
+ \
for (i = 0; i < size; i++) \
{ \
carry = operation (&rp[i], s1p[i], s2p[i], carry); \
@@ -674,7 +674,7 @@ refmpn_sub_err1_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p,
{ \
mp_size_t i; \
mp_limb_t carry2; \
- \
+ \
ASSERT (refmpn_overlap_fullonly_p (rp, s1p, size)); \
ASSERT (refmpn_overlap_fullonly_p (rp, s2p, size)); \
ASSERT (! refmpn_overlap_p (rp, size, y1p, size)); \
@@ -694,7 +694,7 @@ refmpn_sub_err1_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p,
\
ep[0] = ep[1] = CNST_LIMB(0); \
ep[2] = ep[3] = CNST_LIMB(0); \
- \
+ \
for (i = 0; i < size; i++) \
{ \
carry = operation (&rp[i], s1p[i], s2p[i], carry); \
@@ -731,7 +731,7 @@ refmpn_sub_err2_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p,
{ \
mp_size_t i; \
mp_limb_t carry2; \
- \
+ \
ASSERT (refmpn_overlap_fullonly_p (rp, s1p, size)); \
ASSERT (refmpn_overlap_fullonly_p (rp, s2p, size)); \
ASSERT (! refmpn_overlap_p (rp, size, y1p, size)); \
@@ -755,7 +755,7 @@ refmpn_sub_err2_n (mp_ptr rp, mp_srcptr s1p, mp_srcptr s2p,
ep[0] = ep[1] = CNST_LIMB(0); \
ep[2] = ep[3] = CNST_LIMB(0); \
ep[4] = ep[5] = CNST_LIMB(0); \
- \
+ \
for (i = 0; i < size; i++) \
{ \
carry = operation (&rp[i], s1p[i], s2p[i], carry); \
diff --git a/tune/speed.h b/tune/speed.h
index 8a4536219..ddd2a6473 100644
--- a/tune/speed.h
+++ b/tune/speed.h
@@ -2691,24 +2691,24 @@ int speed_routine_count_zeros_setup (struct speed_params *, mp_ptr, int, int);
unsigned i; \
double t; \
TMP_DECL; \
- \
+ \
if (s->size < 2) \
return -1; \
- \
+ \
TMP_MARK; \
- \
+ \
SPEED_TMP_ALLOC_LIMBS (ap, s->size + 1, s->align_xp); \
SPEED_TMP_ALLOC_LIMBS (bp, s->size + 1, s->align_yp); \
- \
+ \
s->xp[s->size - 1] |= 1; \
s->yp[s->size - 1] |= 1; \
- \
+ \
hgcd_init_itch = MPN_HGCD_MATRIX_INIT_ITCH (s->size); \
hgcd_itch = itchfunc (s->size); \
- \
+ \
SPEED_TMP_ALLOC_LIMBS (tmp1, hgcd_init_itch, s->align_wp); \
SPEED_TMP_ALLOC_LIMBS (wp, hgcd_itch, s->align_wp); \
- \
+ \
speed_operand_src (s, s->xp, s->size); \
speed_operand_src (s, s->yp, s->size); \
speed_operand_dst (s, ap, s->size + 1); \
@@ -2716,7 +2716,7 @@ int speed_routine_count_zeros_setup (struct speed_params *, mp_ptr, int, int);
speed_operand_dst (s, wp, hgcd_itch); \
speed_operand_dst (s, tmp1, hgcd_init_itch); \
speed_cache_fill (s); \
- \
+ \
speed_starttime (); \
i = s->reps; \
do \
@@ -2742,24 +2742,24 @@ int speed_routine_count_zeros_setup (struct speed_params *, mp_ptr, int, int);
unsigned i; \
double t; \
TMP_DECL; \
- \
+ \
if (s->size < 2) \
return -1; \
- \
+ \
TMP_MARK; \
- \
+ \
SPEED_TMP_ALLOC_LIMBS (ap, s->size + 1, s->align_xp); \
SPEED_TMP_ALLOC_LIMBS (bp, s->size + 1, s->align_yp); \
- \
+ \
s->xp[s->size - 1] |= 1; \
s->yp[s->size - 1] |= 1; \
- \
+ \
hgcd_init_itch = MPN_HGCD_MATRIX_INIT_ITCH (s->size); \
hgcd_step_itch = itchfunc (s->size, p); \
- \
+ \
SPEED_TMP_ALLOC_LIMBS (tmp1, hgcd_init_itch, s->align_wp); \
SPEED_TMP_ALLOC_LIMBS (wp, hgcd_step_itch, s->align_wp); \
- \
+ \
speed_operand_src (s, s->xp, s->size); \
speed_operand_src (s, s->yp, s->size); \
speed_operand_dst (s, ap, s->size + 1); \
@@ -2767,7 +2767,7 @@ int speed_routine_count_zeros_setup (struct speed_params *, mp_ptr, int, int);
speed_operand_dst (s, wp, hgcd_step_itch); \
speed_operand_dst (s, tmp1, hgcd_init_itch); \
speed_cache_fill (s); \
- \
+ \
speed_starttime (); \
i = s->reps; \
do \