summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog34
-rw-r--r--acinclude.m42
-rw-r--r--doc/configuration2
-rw-r--r--doc/gmp.texi4
-rw-r--r--doc/projects.html8
-rw-r--r--doc/tasks.html4
-rw-r--r--mini-gmp/mini-gmp.c6
-rw-r--r--mpn/cray/README2
-rw-r--r--mpn/generic/broot.c2
-rw-r--r--mpn/generic/divis.c6
-rw-r--r--mpn/generic/gcdext_1.c2
-rw-r--r--mpn/generic/hgcd.c2
-rw-r--r--mpn/generic/hgcd_appr.c17
-rw-r--r--mpn/generic/hgcd_jacobi.c2
-rw-r--r--mpn/generic/hgcd_step.c2
-rw-r--r--mpn/generic/invertappr.c2
-rw-r--r--mpn/generic/mod_1.c2
-rw-r--r--mpn/generic/sbpi1_div_sec.c4
-rw-r--r--mpn/generic/toom42_mulmid.c2
-rw-r--r--mpn/generic/toom_interpolate_12pts.c2
-rw-r--r--mpn/generic/toom_interpolate_16pts.c2
-rw-r--r--mpn/generic/toom_interpolate_5pts.c2
-rw-r--r--mpn/ia64/ia64-defs.m42
-rw-r--r--mpn/mips64/README2
-rw-r--r--mpn/powerpc64/README2
-rw-r--r--mpn/powerpc64/mode64/p6/aorsmul_1.asm2
-rw-r--r--mpn/s390_32/esame/addmul_1.asm2
-rw-r--r--mpn/vax/gmp-mparam.h2
-rw-r--r--mpn/x86/fat/fat.c8
-rw-r--r--mpn/x86/k7/mmx/divrem_1.asm2
-rw-r--r--mpn/x86/pentium4/sse2/popcount.asm2
-rw-r--r--mpn/x86_64/aors_err1_n.asm2
-rw-r--r--mpn/x86_64/missing-call.m42
-rw-r--r--mpn/x86_64/missing-inline.m42
-rw-r--r--mpn/x86_64/missing.asm3
-rw-r--r--mpn/x86_64/mullo_basecase.asm2
-rw-r--r--mpn/x86_64/mulx/adx/addmul_1.asm2
-rw-r--r--mpz/2fac_ui.c2
-rw-r--r--mpz/bin_uiui.c2
-rw-r--r--mpz/prodlimbs.c2
-rw-r--r--mpz/set_f.c2
-rw-r--r--printf/doprnt.c2
-rw-r--r--tests/mpf/t-eq.c2
-rw-r--r--tests/mpf/t-sub.c2
-rw-r--r--tests/mpq/t-cmp.c2
-rw-r--r--tests/mpq/t-cmp_ui.c2
-rw-r--r--tests/mpz/t-jac.c4
-rw-r--r--tests/spinner.c2
-rw-r--r--tune/README2
-rw-r--r--tune/common.c2
-rw-r--r--tune/powerpc.asm2
-rw-r--r--tune/powerpc64.asm2
-rw-r--r--tune/speed.h2
-rw-r--r--tune/time.c2
-rw-r--r--tune/tuneup.c2
55 files changed, 93 insertions, 93 deletions
diff --git a/ChangeLog b/ChangeLog
index 042c94b10..da892efd7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1286,7 +1286,7 @@
2013-01-10 Torbjorn Granlund <tege@gmplib.org>
- * mini-gmp/tests/t-import.c (main): Don't drop off functon end.
+ * mini-gmp/tests/t-import.c (main): Don't drop off function end.
* Makefile.am (check-mini-gmp): Set LD_LIBRARY_PATH to allow testing
with dynamic main GMP build.
@@ -1314,7 +1314,7 @@
* mini-gmp/README: Document base limitation for conversions.
* mini-gmp/mini-gmp.c (mpz_set_str): Remove goto.
- (mpz_import, mpz_export): Correctly use order/endianess.
+ (mpz_import, mpz_export): Correctly use order/endianness.
2013-01-05 Torbjorn Granlund <tege@gmplib.org>
@@ -2294,7 +2294,7 @@
2012-04-02 Marco Bodrato <bodrato@mail.dm.unipi.it>
- * mpz/oddfac_1.c: Initalize size for ASSERT.
+ * mpz/oddfac_1.c: Initialize size for ASSERT.
2012-04-02 Torbjorn Granlund <tege@gmplib.org>
@@ -2827,7 +2827,7 @@
2012-02-10 Niels Möller <nisse@lysator.liu.se>
- * tests/mpz/t-gcd.c (gcdext_valid_p): Enforce sligthly stricter
+ * tests/mpz/t-gcd.c (gcdext_valid_p): Enforce slightly stricter
bound for cofactors.
* mpn/generic/gcdext_lehmer.c (mpn_gcdext_hook): Corrected
@@ -3987,7 +3987,7 @@
2011-05-23 Niels Möller <nisse@lysator.liu.se>
- * mpz/jacobi.c (mpz_jacobi): Simplied by swapping operands when
+ * mpz/jacobi.c (mpz_jacobi): Simplified by swapping operands when
needed, to get asize >= bsize. Use the reciprocity law generalized
to work when one operand is even.
@@ -4615,7 +4615,7 @@
* tune/tuneup.c (tune_mod_1): Record result of MOD_1_1P_METHOD
measurement for use by mpn_mod_1_tune. And omit measurement if
- mpn_mod_1_1p is native assebly code.
+ mpn_mod_1_1p is native assembly code.
* mpn/generic/mod_1.c (mpn_mod_1_1p) [TUNE_PROGRAM_BUILD]: Macro
to check mod_1_1p_method and call the right function.
@@ -5658,7 +5658,7 @@
* configure.in (powerpc64): Support CPU specific mode-less subdirs.
* mpn/powerpc64/aix.m4 (PROLOGUE_cpu): Use "named csect" making
- requested aignment actually honoured.
+ requested alignment actually honoured.
2010-04-30 Niels Möller <nisse@lysator.liu.se>
@@ -12883,7 +12883,7 @@
numbers.
(hgcd_tdiv): New function.
(gcd_lehmer): New function, currently #if:ed out.
- (hgcd_start_row_p): New function, duplicatd from hgcd.c.
+ (hgcd_start_row_p): New function, duplicated from hgcd.c.
(gcd_schoenhage_itch): New function.
(gcd_schoenhage): New function.
(mpn_gcd): New advertised gcd function, which calls
@@ -13756,7 +13756,7 @@
2003-09-24 Torbjorn Granlund <tege@swox.com>
- * tune/speed.c (routine): Add entires for mpn_addlsh1_n, mpn_sublsh1_n,
+ * tune/speed.c (routine): Add entries for mpn_addlsh1_n, mpn_sublsh1_n,
mpn_rsh1add_n, and mpn_rsh1sub_n.
* tune/speed.h: Declare speed_mpn_addlsh1_n, speed_mpn_sublsh1_n,
@@ -13850,7 +13850,7 @@
mpn_addlsh1_n. Avoid all copying, at the expense of some additional
workspace.
- * gmp-impl.h (MPN_TOOM3_MUL_N_TSIZE, MPN_TOOM3_SQR_N_TSIZE): Accomodate
+ * gmp-impl.h (MPN_TOOM3_MUL_N_TSIZE, MPN_TOOM3_SQR_N_TSIZE): Accommodate
latest toom3 code.
2003-09-19 Kevin Ryde <kevin@swox.se>
@@ -14715,7 +14715,7 @@
* mpz/get_str.c: Likewise.
* mpf/get_str.c: Likewise.
- * mpz/inp_str.c: Restucture to allocate more string space just
+ * mpz/inp_str.c: Restructure to allocate more string space just
before needed.
* mpbsd/min.c: Likewise.
@@ -17035,7 +17035,7 @@
* mpf/fits_u.h: Rewrite - nailify.
* mpf/fits_s.h: Likewise.
- * mpz/mod.c: Disambiguate if-statament with extra {}.
+ * mpz/mod.c: Disambiguate if-statement with extra {}.
* mpf/int_p.c: Fix type of size variables.
* mpf/get_ui: Likewise.
@@ -18193,7 +18193,7 @@
* mpn/mp_bases.c: Add entries for base 256. Remove __ prefix from
table name.
- * gmp-impl.h (__mp_bases): Remove superflous `mp_' part of name, making
+ * gmp-impl.h (__mp_bases): Remove superfluous mp_ part of name, making
it __gmpn_bases instead of __gmpn_mp_bases.
(mp_bases): New #define.
* tune/speed.h (SPEED_ROUTINE_MPN_SET_STR): Allow bases up to 256.
@@ -25273,7 +25273,7 @@
2000-04-16 Torbjorn Granlund <tege@swox.com>
- * mpn/generic/dump.c: Suppress output of leadign zeros.
+ * mpn/generic/dump.c: Suppress output of leading zeros.
* mpz/inp_str.c: Fix memory leakage.
@@ -26180,7 +26180,7 @@
tests/rand/Makefile.
* acinclude.m4 (AC_CANONICAL_BUILD): Define to
- `_AC_CANONICAL_BUILD' to deal with incompabilities between
+ `_AC_CANONICAL_BUILD' to deal with incompatibilities between
Autoconf and Libtool.
(AC_CHECK_TOOL_PREFIX): Likewise.
@@ -27027,7 +27027,7 @@
appropriate.
(athlon-*-*): Fix typo.
- * config.guess: Update x86 recog code to intiallly match
+ * config.guess: Update x86 recog code to initially match
more than just i386.
Call K6-2 and K6-III for "k62" and "k63" respectively.
@@ -27981,7 +27981,7 @@ Wed Jan 28 20:28:19 1998 Torbjorn Granlund <tege@sophie.matematik.su.se>
Mon Jan 26 01:39:02 1998 Torbjorn Granlund <tege@tunnis.tmg.se>
* mpz/pprime_p.c (mpz_probab_prime_p): Major overhaul: Check small
- numers specifically; check small factors, then perform a fermat test.
+ numbers specifically; check small factors, then perform a fermat test.
Tue Jan 13 14:58:28 1998 Torbjorn Granlund <tege@tunnis.tmg.se>
diff --git a/acinclude.m4 b/acinclude.m4
index 225afe20d..e95c376cc 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1831,7 +1831,7 @@ AC_CACHE_CHECK([if the .align directive accepts an 0x90 fill in .text],
.byte 0
.align 4, 0x90],
[if grep "Warning: Fill parameter ignored for executable section" conftest.out >/dev/null; then
- echo "Supressing this warning by omitting 0x90" 1>&AC_FD_CC
+ echo "Suppressing this warning by omitting 0x90" 1>&AC_FD_CC
gmp_cv_asm_align_fill_0x90=no
else
gmp_cv_asm_align_fill_0x90=yes
diff --git a/doc/configuration b/doc/configuration
index 903067cc5..9e0abd780 100644
--- a/doc/configuration
+++ b/doc/configuration
@@ -46,7 +46,7 @@ The same applies to mpf, mpq, scanf and printf.
The way we build libmpn (in the `mpn' subdirectory) is quite special.
-Currently only mpn/mp_bases.c is truely generic and included in every
+Currently only mpn/mp_bases.c is truly generic and included in every
configuration. All other files are linked at build time into the mpn
build directory from one of the CPU specific sub-directories, or from
the mpn/generic directory.
diff --git a/doc/gmp.texi b/doc/gmp.texi
index 61b20fd27..0e4440618 100644
--- a/doc/gmp.texi
+++ b/doc/gmp.texi
@@ -8017,10 +8017,10 @@ a multiple of 4 points, that's why for higher degree Toom'n'half is used.
Toom'n'half means that the existence of one more piece is considered for a
single operand. It can be virtual, i.e. zero, or real, when the two operand
-are not exactly balanced. By chosing an even @math{r},
+are not exactly balanced. By choosing an even @math{r},
Toom-@m{r{1\over2},r+1/2} requires @math{2r} points, a multiple of four.
-The four-plets of points inlcude 0, @m{\infty,inf}, +1, -1 and
+The four-plets of points include 0, @m{\infty,inf}, +1, -1 and
@m{\pm2^i,+-2^i}, @m{\pm2^{-i},+-2^-i} . Each of them giving shortcuts for the
evaluation phase and for some steps in the interpolation phase. Further tricks
are used to reduce the memory footprint of the whole multiplication algorithm
diff --git a/doc/projects.html b/doc/projects.html
index 35caf59fa..02540a931 100644
--- a/doc/projects.html
+++ b/doc/projects.html
@@ -37,7 +37,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
<hr>
<!-- NB. timestamp updated automatically by emacs -->
- This file current as of 5 Dec 2011. An up-to-date version is available at
+ This file current as of 21 Jul 2013. An up-to-date version is available at
<a href="http://gmplib.org/projects.html">http://gmplib.org/projects.html</a>.
Please send comments about this page to gmp-devel<font>@</font>gmplib.org.
@@ -151,7 +151,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
<p> Add more functions to the set of fat functions.
- <p> The speed of multipliciaton is today highly dependent on combination
+ <p> The speed of multiplication is today highly dependent on combination
functions like <code>addlsh1_n</code>. A fat binary will never use any such
functions, since they are classified as optional. Ideally, we should use
them, but making the current compile-time selections of optional functions
@@ -323,12 +323,12 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
<li> <strong>Factorial</strong>
- <p> Rewrite for simplicty and speed. Work is in progress.
+ <p> Rewrite for simplicity and speed. Work is in progress.
<li> <strong>Binomial Coefficients</strong>
- <p> Rewrite for simplicty and speed. Work is in progress.
+ <p> Rewrite for simplicity and speed. Work is in progress.
<li> <strong>Prime Testing</strong>
diff --git a/doc/tasks.html b/doc/tasks.html
index da4dfe014..5b74f980c 100644
--- a/doc/tasks.html
+++ b/doc/tasks.html
@@ -37,7 +37,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
<hr>
<!-- NB. timestamp updated automatically by emacs -->
- This file current as of 5 Dec 2011. An up-to-date version is available at
+ This file current as of 21 Jul 2013. An up-to-date version is available at
<a href="http://gmplib.org/tasks.html">http://gmplib.org/tasks.html</a>.
Please send comments about this page to gmp-devel<font>@</font>gmplib.org.
@@ -704,7 +704,7 @@ either already been taken care of, or have become irrelevant.
Consider making these variant <code>mpz_set_str</code> etc forms
available for <code>mpz_t</code> too, not just <code>mpz_class</code>
etc.
-<li> <code>mpq_class operator+=</code>: Don't emit an unnecssary
+<li> <code>mpq_class operator+=</code>: Don't emit an unnecessary
<code>mpq_set(q,q)</code> before <code>mpz_addmul</code> etc.
<li> Put various bits of gmpxx.h into libgmpxx, to avoid excessive inlining.
Candidates for this would be,
diff --git a/mini-gmp/mini-gmp.c b/mini-gmp/mini-gmp.c
index 3d193cfea..e3db9e029 100644
--- a/mini-gmp/mini-gmp.c
+++ b/mini-gmp/mini-gmp.c
@@ -1348,7 +1348,7 @@ mpz_init (mpz_t r)
}
/* The utility of this function is a bit limited, since many functions
- assings the result variable using mpz_swap. */
+ assigns the result variable using mpz_swap. */
void
mpz_init2 (mpz_t r, mp_bitcnt_t bits)
{
@@ -3002,7 +3002,7 @@ mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
if (e->_mp_size < 0)
{
if (!mpz_invert (base, b, m))
- gmp_die ("mpz_powm: Negative exponent and non-invertibe base.");
+ gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
}
else
{
@@ -3200,7 +3200,7 @@ mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
/* Numbers are treated as if represented in two's complement (and
infinitely sign extended). For a negative values we get the two's
- complement from -x = ~x + 1, where ~ is bitwise complementt.
+ complement from -x = ~x + 1, where ~ is bitwise complement.
Negation transforms
xxxx10...0
diff --git a/mpn/cray/README b/mpn/cray/README
index ccd743997..ab7616f58 100644
--- a/mpn/cray/README
+++ b/mpn/cray/README
@@ -107,4 +107,4 @@ down to 2.5 cycles/limb and mpn_addmul_1 times to 4 cycles/limb. By
storing even fewer bits per limb, perhaps 56, it would be possible to
write a mul_mul_basecase that would run at effectively 1 cycle/limb.
(Use VM here to better handle the romb-shaped multiply area, perhaps
-rouding operand sizes up to the next power of 2.)
+rounding operand sizes up to the next power of 2.)
diff --git a/mpn/generic/broot.c b/mpn/generic/broot.c
index cc1a4c038..4554226cf 100644
--- a/mpn/generic/broot.c
+++ b/mpn/generic/broot.c
@@ -59,7 +59,7 @@ powlimb (mp_limb_t a, mp_limb_t e)
r' = r - (a^{k-1} r^{k+1} - r) / k
- where we still have cancelation of low limbs.
+ where we still have cancellation of low limbs.
*/
void
diff --git a/mpn/generic/divis.c b/mpn/generic/divis.c
index e6d08f7c9..3e18c17fe 100644
--- a/mpn/generic/divis.c
+++ b/mpn/generic/divis.c
@@ -41,9 +41,9 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
there's no addback, but it would need a multi-precision inverse and so
might be slower than the plain method (on small sizes at least).
- When D must be normalized (shifted to low bit set), it's possible to supress
- the bit-shifting of A down, as long as it's already been checked that A has
- at least as many trailing zero bits as D. */
+ When D must be normalized (shifted to low bit set), it's possible to
+ suppress the bit-shifting of A down, as long as it's already been checked
+ that A has at least as many trailing zero bits as D. */
int
mpn_divisible_p (mp_srcptr ap, mp_size_t an,
diff --git a/mpn/generic/gcdext_1.c b/mpn/generic/gcdext_1.c
index 3bb4d21df..20d818e7a 100644
--- a/mpn/generic/gcdext_1.c
+++ b/mpn/generic/gcdext_1.c
@@ -55,7 +55,7 @@ mpn_gcdext_1 (mp_limb_signed_t *sp, mp_limb_signed_t *tp,
V = s1 u + s0 v
where U, V are the inputs (without any shared power of two),
- and the matris has determinant ± 2^{shift}.
+ and the matrix has determinant ± 2^{shift}.
*/
mp_limb_t s0 = 1;
mp_limb_t t0 = 0;
diff --git a/mpn/generic/hgcd.c b/mpn/generic/hgcd.c
index f51bbde81..5f8503bc0 100644
--- a/mpn/generic/hgcd.c
+++ b/mpn/generic/hgcd.c
@@ -98,7 +98,7 @@ mpn_hgcd (mp_ptr ap, mp_ptr bp, mp_size_t n,
success = 1;
}
- /* NOTE: It apppears this loop never runs more than once (at
+ /* NOTE: It appears this loop never runs more than once (at
least when not recursing to hgcd_appr). */
while (n > n2)
{
diff --git a/mpn/generic/hgcd_appr.c b/mpn/generic/hgcd_appr.c
index bb8536ae6..7ce513cdb 100644
--- a/mpn/generic/hgcd_appr.c
+++ b/mpn/generic/hgcd_appr.c
@@ -97,7 +97,7 @@ mpn_hgcd_appr (mp_ptr ap, mp_ptr bp, mp_size_t n,
sbits <-- sbits + 1 - p,
rather than just sbits <-- sbits - p. This adjustment makes
- the produced matrix sligthly smaller than it could be. */
+ the produced matrix slightly smaller than it could be. */
if (GMP_NUMB_BITS * (n + 1) + 2 * extra_bits <= 2*GMP_NUMB_BITS * s)
{
@@ -130,14 +130,13 @@ mpn_hgcd_appr (mp_ptr ap, mp_ptr bp, mp_size_t n,
if (extra_bits > 0)
{
- /* We can get here only of we have dropped at least one of the
- least significant bits, so we can decrement ap and bp. We can
- then shift left extra bits using mpn_shiftr. */
- /* NOTE: In the unlikely case that n is large, it would be
- preferable to do an initial subdiv step to reduce the size
- before shifting, but that would mean daplicating
- mpn_gcd_subdiv_step with a bit count rather than a limb
- count. */
+ /* We can get here only of we have dropped at least one of the least
+ significant bits, so we can decrement ap and bp. We can then shift
+ left extra bits using mpn_rshift. */
+ /* NOTE: In the unlikely case that n is large, it would be preferable
+ to do an initial subdiv step to reduce the size before shifting,
+ but that would mean duplicating mpn_gcd_subdiv_step with a bit
+ count rather than a limb count. */
ap--; bp--;
ap[0] = mpn_rshift (ap+1, ap+1, n, GMP_NUMB_BITS - extra_bits);
bp[0] = mpn_rshift (bp+1, bp+1, n, GMP_NUMB_BITS - extra_bits);
diff --git a/mpn/generic/hgcd_jacobi.c b/mpn/generic/hgcd_jacobi.c
index 728755a62..177b8be29 100644
--- a/mpn/generic/hgcd_jacobi.c
+++ b/mpn/generic/hgcd_jacobi.c
@@ -60,7 +60,7 @@ hgcd_jacobi_hook (void *p, mp_srcptr gp, mp_size_t gn,
below the given size s. Return new size for a and b, or 0 if no
more steps are possible.
- If hgcd2 succeds, needs temporary space for hgcd_matrix_mul_1, M->n
+ If hgcd2 succeeds, needs temporary space for hgcd_matrix_mul_1, M->n
limbs, and hgcd_mul_matrix1_inverse_vector, n limbs. If hgcd2
fails, needs space for the quotient, qn <= n - s + 1 limbs, for and
hgcd_matrix_update_q, qn + (size of the appropriate column of M) <=
diff --git a/mpn/generic/hgcd_step.c b/mpn/generic/hgcd_step.c
index 740c56b28..fbb07922c 100644
--- a/mpn/generic/hgcd_step.c
+++ b/mpn/generic/hgcd_step.c
@@ -51,7 +51,7 @@ hgcd_hook (void *p, mp_srcptr gp, mp_size_t gn,
below the given size s. Return new size for a and b, or 0 if no
more steps are possible.
- If hgcd2 succeds, needs temporary space for hgcd_matrix_mul_1, M->n
+ If hgcd2 succeeds, needs temporary space for hgcd_matrix_mul_1, M->n
limbs, and hgcd_mul_matrix1_inverse_vector, n limbs. If hgcd2
fails, needs space for the quotient, qn <= n - s limbs, for and
hgcd_matrix_update_q, qn + (size of the appropriate column of M) <=
diff --git a/mpn/generic/invertappr.c b/mpn/generic/invertappr.c
index 6430d2ea3..747c42072 100644
--- a/mpn/generic/invertappr.c
+++ b/mpn/generic/invertappr.c
@@ -37,7 +37,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
#include "gmp-impl.h"
#include "longlong.h"
-/* FIXME: The iterative version splits the operand in two slighty unbalanced
+/* FIXME: The iterative version splits the operand in two slightly unbalanced
parts, the use of log_2 (or counting the bits) underestimate the maximum
number of iterations. */
diff --git a/mpn/generic/mod_1.c b/mpn/generic/mod_1.c
index 66c332ea3..0474c8ba6 100644
--- a/mpn/generic/mod_1.c
+++ b/mpn/generic/mod_1.c
@@ -59,7 +59,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
#endif
#if TUNE_PROGRAM_BUILD && !HAVE_NATIVE_mpn_mod_1_1p
-/* Duplicates declaratinos in tune/speed.h */
+/* Duplicates declarations in tune/speed.h */
mp_limb_t mpn_mod_1_1p_1 (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [4]);
mp_limb_t mpn_mod_1_1p_2 (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [4]);
diff --git a/mpn/generic/sbpi1_div_sec.c b/mpn/generic/sbpi1_div_sec.c
index aaa1b4bb3..2b9a91233 100644
--- a/mpn/generic/sbpi1_div_sec.c
+++ b/mpn/generic/sbpi1_div_sec.c
@@ -36,7 +36,7 @@ with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
too large (B is the limb base, D is the divisor, and i is the induction
variable); the subsequent step will handle the extra partial remainder bits.
- WIth that partial remainder reduction, each step generates a quotient "half
+ With that partial remainder reduction, each step generates a quotient "half
limb". The outer loop generates two quotient half limbs, an upper (q1h) and
a lower (q0h) which are stored sparsely in separate limb arrays. These
arrays are added at the end; using separate arrays avoids data-dependent
@@ -48,7 +48,7 @@ with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
remainders, which we reduce later, as described above.
In order to keep quotients from getting too big, corresponding to a negative
- partial remainder, we use an inverse which is sligtly smaller than usually.
+ partial remainder, we use an inverse which is slightly smaller than usually.
*/
#if OPERATION_sbpi1_div_qr_sec
diff --git a/mpn/generic/toom42_mulmid.c b/mpn/generic/toom42_mulmid.c
index c77b00f95..30061a82a 100644
--- a/mpn/generic/toom42_mulmid.c
+++ b/mpn/generic/toom42_mulmid.c
@@ -219,7 +219,7 @@ mpn_toom42_mulmid (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n,
/* FIXME: should probably define an interface "mpn_mulmid_diag_1"
that can handle the sum below. Currently we're relying on
mulmid_basecase being pretty fast for a diagonal sum like this,
- which is true at least for the K8 asm verion, but surely false
+ which is true at least for the K8 asm version, but surely false
for the generic version. */
mpn_mulmid_basecase (e, ap + n - 1, n - 1, bp, n - 1);
mpn_add_n (rp + n - 1, rp + n - 1, e, 3);
diff --git a/mpn/generic/toom_interpolate_12pts.c b/mpn/generic/toom_interpolate_12pts.c
index e4a765bed..577082f59 100644
--- a/mpn/generic/toom_interpolate_12pts.c
+++ b/mpn/generic/toom_interpolate_12pts.c
@@ -1,4 +1,4 @@
-/* Interpolaton for the algorithm Toom-Cook 6.5-way.
+/* Interpolation for the algorithm Toom-Cook 6.5-way.
Contributed to the GNU project by Marco Bodrato.
diff --git a/mpn/generic/toom_interpolate_16pts.c b/mpn/generic/toom_interpolate_16pts.c
index 445479cd4..a90913f65 100644
--- a/mpn/generic/toom_interpolate_16pts.c
+++ b/mpn/generic/toom_interpolate_16pts.c
@@ -1,4 +1,4 @@
-/* Interpolaton for the algorithm Toom-Cook 8.5-way.
+/* Interpolation for the algorithm Toom-Cook 8.5-way.
Contributed to the GNU project by Marco Bodrato.
diff --git a/mpn/generic/toom_interpolate_5pts.c b/mpn/generic/toom_interpolate_5pts.c
index 8416b641c..958e30cbb 100644
--- a/mpn/generic/toom_interpolate_5pts.c
+++ b/mpn/generic/toom_interpolate_5pts.c
@@ -141,7 +141,7 @@ mpn_toom_interpolate_5pts (mp_ptr c, mp_ptr v2, mp_ptr vm1,
1 0 1 0 0; v1
0 1 0 1 0; vm1
0 0 0 0 1] v0
- Some vaues already are in-place (we added vm1 in the correct position)
+ Some values already are in-place (we added vm1 in the correct position)
| vinf| v1 | v0 |
| vm1 |
One still is in a separated area
diff --git a/mpn/ia64/ia64-defs.m4 b/mpn/ia64/ia64-defs.m4
index 419adc437..e32ac14f5 100644
--- a/mpn/ia64/ia64-defs.m4
+++ b/mpn/ia64/ia64-defs.m4
@@ -88,7 +88,7 @@ m4_assert_defined(`IA64_ALIGN_OK')
dnl Usage: ASSERT([pr] [,code])
dnl
-dnl Require that the given predictate register is true after executing the
+dnl Require that the given predicate register is true after executing the
dnl test code. For example,
dnl
dnl ASSERT(p6,
diff --git a/mpn/mips64/README b/mpn/mips64/README
index 65a1af166..571604169 100644
--- a/mpn/mips64/README
+++ b/mpn/mips64/README
@@ -30,7 +30,7 @@ RELEVANT OPTIMIZATION ISSUES
On the R4600, branches takes a single cycle
- On the R8000, branches often take no noticable cycles, as they are
+ On the R8000, branches often take no noticeable cycles, as they are
executed in a separate function unit..
2. The R4000 and R4400 have a load latency of 4 cycles.
diff --git a/mpn/powerpc64/README b/mpn/powerpc64/README
index 020ad23cd..25d313077 100644
--- a/mpn/powerpc64/README
+++ b/mpn/powerpc64/README
@@ -139,7 +139,7 @@ Problem is to get 32-bit or 16-bit words to the fp registers. Only 64-bit fp
memops copies bits without fiddling with them. We might therefore need to
load to integer registers with zero extension, store as 64 bits into temp
space, and then load to fp regs. Alternatively, load directly to fp space
-and add well-chosen constants to get cancelation. (Other part after given by
+and add well-chosen constants to get cancellation. (Other part after given by
subsequent subtraction.)
Possible code mix for load-via-intregs variant:
diff --git a/mpn/powerpc64/mode64/p6/aorsmul_1.asm b/mpn/powerpc64/mode64/p6/aorsmul_1.asm
index 4bd508488..25c96fe53 100644
--- a/mpn/powerpc64/mode64/p6/aorsmul_1.asm
+++ b/mpn/powerpc64/mode64/p6/aorsmul_1.asm
@@ -33,7 +33,7 @@ C * Reduce register usage.
C * Schedule function entry code.
C * Unroll more. 8-way unrolling would bring us to 10 c/l, 16-way unrolling
C would bring us to 9 c/l.
-C * Handle n = 1 and perhaps n = 2 seperately, without saving any registers.
+C * Handle n = 1 and perhaps n = 2 separately, without saving any registers.
C INPUT PARAMETERS
define(`rp', `r3')
diff --git a/mpn/s390_32/esame/addmul_1.asm b/mpn/s390_32/esame/addmul_1.asm
index b9a57ac92..8023e445b 100644
--- a/mpn/s390_32/esame/addmul_1.asm
+++ b/mpn/s390_32/esame/addmul_1.asm
@@ -37,7 +37,7 @@ define(`z', `%r9')
ASM_START()
PROLOGUE(mpn_addmul_1)
stm %r9, %r12, 36(%r15)
- lhi %r12, 0 C zero index reister
+ lhi %r12, 0 C zero index register
ahi %r12, 0 C clear carry fla
lhi %r11, 0 C clear carry limb
lhi z, 0 C clear carry limb
diff --git a/mpn/vax/gmp-mparam.h b/mpn/vax/gmp-mparam.h
index f4362822c..4ed9ad7fb 100644
--- a/mpn/vax/gmp-mparam.h
+++ b/mpn/vax/gmp-mparam.h
@@ -18,7 +18,7 @@ You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
/* These numbers were measured manually using the tune/speed program.
- The standard tune/tunup takes too long. (VAX 8800) */
+ The standard tune/tuneup takes too long. (VAX 8800) */
#define MUL_TOOM22_THRESHOLD 14
#define MUL_TOOM33_THRESHOLD 110
diff --git a/mpn/x86/fat/fat.c b/mpn/x86/fat/fat.c
index bb42eb9bf..c0f464aa5 100644
--- a/mpn/x86/fat/fat.c
+++ b/mpn/x86/fat/fat.c
@@ -272,7 +272,7 @@ __gmpn_cpuvec_init (void)
case 0x09: /* Banias */
case 0x0d: /* Dothan */
case 0x0e: /* Yonah */
- TRACE (printf (" Banias/Bothan/Yonah\n"));
+ TRACE (printf (" Banias/Dothan/Yonah\n"));
CPUVEC_SETUP_p6_mmx;
CPUVEC_SETUP_p6_p3mmx;
CPUVEC_SETUP_p6_sse2;
@@ -387,9 +387,9 @@ __gmpn_cpuvec_init (void)
case 0x0f: /* k8 */
case 0x11: /* "fam 11h", mix of k8 and k10 */
- case 0x13: /* unknown, conservativeky assume k8 */
- case 0x16: /* unknown, conservativeky assume k8 */
- case 0x17: /* unknown, conservativeky assume k8 */
+ case 0x13: /* unknown, conservatively assume k8 */
+ case 0x16: /* unknown, conservatively assume k8 */
+ case 0x17: /* unknown, conservatively assume k8 */
TRACE (printf (" k8\n"));
CPUVEC_SETUP_k7;
CPUVEC_SETUP_k7_mmx;
diff --git a/mpn/x86/k7/mmx/divrem_1.asm b/mpn/x86/k7/mmx/divrem_1.asm
index 8c771a32b..fe98f8d61 100644
--- a/mpn/x86/k7/mmx/divrem_1.asm
+++ b/mpn/x86/k7/mmx/divrem_1.asm
@@ -445,7 +445,7 @@ C chain, and nothing better than 18 cycles has been found when using it.
C The jump is taken only when q1 is 0xFFFFFFFF, and on random data this will
C be an extremely rare event.
C
-C Branch mispredictions will hit random occurrances of q1==0xFFFFFFFF, but
+C Branch mispredictions will hit random occurrences of q1==0xFFFFFFFF, but
C if some special data is coming out with this always, the q1_ff special
C case actually runs at 15 c/l. 0x2FFF...FFFD divided by 3 is a good way to
C induce the q1_ff case, for speed measurements or testing. Note that
diff --git a/mpn/x86/pentium4/sse2/popcount.asm b/mpn/x86/pentium4/sse2/popcount.asm
index 2f06984c7..434faa084 100644
--- a/mpn/x86/pentium4/sse2/popcount.asm
+++ b/mpn/x86/pentium4/sse2/popcount.asm
@@ -49,7 +49,7 @@ C VIA nano 6.5
C TODO
C * Make a mpn_hamdist based on this. Alignment could either be handled by
C using movdqu for one operand and movdqa for the other, or by painfully
-C shifting as we go. Unfortunately, there seem to be no useable shift
+C shifting as we go. Unfortunately, there seem to be no usable shift
C instruction, except for one that takes an immediate count.
C * It would probably be possible to cut a few cycles/limb using software
C pipelining.
diff --git a/mpn/x86_64/aors_err1_n.asm b/mpn/x86_64/aors_err1_n.asm
index 6016c1062..12fab65f2 100644
--- a/mpn/x86_64/aors_err1_n.asm
+++ b/mpn/x86_64/aors_err1_n.asm
@@ -22,7 +22,7 @@ dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
include(`../config.m4')
C cycles/limb
-C AMD K8,K9 2.75 (most alignments, degenerates to 3 c/l for some aligments)
+C AMD K8,K9 2.75 (degenerates to 3 c/l for some alignments)
C AMD K10 ?
C Intel P4 ?
C Intel core2 ?
diff --git a/mpn/x86_64/missing-call.m4 b/mpn/x86_64/missing-call.m4
index 8d8c92a0b..40c307987 100644
--- a/mpn/x86_64/missing-call.m4
+++ b/mpn/x86_64/missing-call.m4
@@ -1,4 +1,4 @@
-dnl AMD64 MULX/ADX simultion support, function call version.
+dnl AMD64 MULX/ADX simulation support, function call version.
dnl Contributed to the GNU project by Torbjörn Granlund.
diff --git a/mpn/x86_64/missing-inline.m4 b/mpn/x86_64/missing-inline.m4
index 32cf430b1..69c2a075e 100644
--- a/mpn/x86_64/missing-inline.m4
+++ b/mpn/x86_64/missing-inline.m4
@@ -1,4 +1,4 @@
-dnl AMD64 MULX/ADX simultion support, inline version.
+dnl AMD64 MULX/ADX simulation support, inline version.
dnl Contributed to the GNU project by Torbjörn Granlund.
diff --git a/mpn/x86_64/missing.asm b/mpn/x86_64/missing.asm
index 71943b74b..9c8156c29 100644
--- a/mpn/x86_64/missing.asm
+++ b/mpn/x86_64/missing.asm
@@ -1,4 +1,5 @@
-dnl AMD64 MULX/ADX simultion support.
+
+ dnl AMD64 MULX/ADX simulation support.
dnl Contributed to the GNU project by Torbjörn Granlund.
diff --git a/mpn/x86_64/mullo_basecase.asm b/mpn/x86_64/mullo_basecase.asm
index a066b8256..4c456edda 100644
--- a/mpn/x86_64/mullo_basecase.asm
+++ b/mpn/x86_64/mullo_basecase.asm
@@ -31,7 +31,7 @@ C size congruence class.
C * Stop iterating addmul_2 earlier, falling into straight-line triangle code
C for the last 2-3 iterations.
C * Perhaps implement n=4 special code.
-C * The reload of the outer loop jump address hurts branch preditiction.
+C * The reload of the outer loop jump address hurts branch prediction.
C * The addmul_2 loop ends with an MUL whose high part is not used upon loop
C exit.
diff --git a/mpn/x86_64/mulx/adx/addmul_1.asm b/mpn/x86_64/mulx/adx/addmul_1.asm
index 17524f819..db46326db 100644
--- a/mpn/x86_64/mulx/adx/addmul_1.asm
+++ b/mpn/x86_64/mulx/adx/addmul_1.asm
@@ -43,7 +43,7 @@ define(`v0_param',`%rcx') dnl r9
define(`n', `%rcx') dnl
define(`v0', `%rdx') dnl
-C Testing mechanism for running this on older AMD64 processrs
+C Testing mechanism for running this on older AMD64 processors
ifelse(FAKE_MULXADX,1,`
include(CONFIG_TOP_SRCDIR`/mpn/x86_64/missing-call.m4')
',`
diff --git a/mpz/2fac_ui.c b/mpz/2fac_ui.c
index 2fd7c7f1c..60ccc87ca 100644
--- a/mpz/2fac_ui.c
+++ b/mpz/2fac_ui.c
@@ -62,7 +62,7 @@ mpz_2fac_ui (mpz_ptr x, unsigned long n)
mp_limb_t *factors, prod, max_prod, j;
TMP_SDECL;
- /* FIXME: we might alloc a fixed ammount 1+FAC_2DSC_THRESHOLD/FACTORS_PER_LIMB */
+ /* FIXME: we might alloc a fixed amount 1+FAC_2DSC_THRESHOLD/FACTORS_PER_LIMB */
TMP_SMARK;
factors = TMP_SALLOC_LIMBS (1 + n / (2 * FACTORS_PER_LIMB));
diff --git a/mpz/bin_uiui.c b/mpz/bin_uiui.c
index d86fb298e..73c4e90dc 100644
--- a/mpz/bin_uiui.c
+++ b/mpz/bin_uiui.c
@@ -149,7 +149,7 @@ typedef mp_limb_t (* mulfunc_t) (mp_limb_t);
static const mulfunc_t mulfunc[] = {mul1,mul2,mul3,mul4,mul5,mul6,mul7,mul8};
#define M (numberof(mulfunc))
-/* Number of factors-of-2 removed by the corresponding mulN functon. */
+/* Number of factors-of-2 removed by the corresponding mulN function. */
static const unsigned char tcnttab[] = {0, 1, 1, 2, 2, 4, 4, 6};
#if 1
diff --git a/mpz/prodlimbs.c b/mpz/prodlimbs.c
index 867688755..dc411f98e 100644
--- a/mpz/prodlimbs.c
+++ b/mpz/prodlimbs.c
@@ -38,7 +38,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
/* Computes the product of the j>1 limbs pointed by factors, puts the
* result in x. It assumes that all limbs are non-zero. Above
- * Karatsuba's threshold it uses a binary splitting startegy, to gain
+ * Karatsuba's threshold it uses a binary splitting strategy, to gain
* speed by the asymptotically fast multiplication algorithms.
*
* The list in {factors, j} is overwritten.
diff --git a/mpz/set_f.c b/mpz/set_f.c
index 3ea14e312..13e4768cf 100644
--- a/mpz/set_f.c
+++ b/mpz/set_f.c
@@ -52,7 +52,7 @@ mpz_set_f (mpz_ptr w, mpf_srcptr u)
}
else
{
- /* exp<=size, trucate to the high "exp" many limbs */
+ /* exp<=size, truncate to the high "exp" many limbs */
up += (size - exp);
size = exp;
}
diff --git a/printf/doprnt.c b/printf/doprnt.c
index c1ee0a2ae..3555979cd 100644
--- a/printf/doprnt.c
+++ b/printf/doprnt.c
@@ -94,7 +94,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
In GMP 4.1.1 we documented "ll" and "L" as being equivalent, but in C99
in fact "ll" is just for long long and "L" just for long double.
- Apparentely GLIBC allows "L" for long long though. This doesn't affect
+ Apparently GLIBC allows "L" for long long though. This doesn't affect
us as such, since both are passed through to the C library. To be
consistent with what we said before, the two are treated equivalently
here, and it's left to the C library to do what it thinks with them.
diff --git a/tests/mpf/t-eq.c b/tests/mpf/t-eq.c
index 8e645b690..7ce67676b 100644
--- a/tests/mpf/t-eq.c
+++ b/tests/mpf/t-eq.c
@@ -86,7 +86,7 @@ check_data (void)
if (got != got_swapped || got != data[i].want)
{
- printf ("check_data() wrong reault at data[%d]\n", i);
+ printf ("check_data() wrong result at data[%d]\n", i);
mpf_trace ("x ", x);
mpf_trace ("y ", y);
printf ("got %d\n", got);
diff --git a/tests/mpf/t-sub.c b/tests/mpf/t-sub.c
index 8cd1e2c1b..2a6606f0d 100644
--- a/tests/mpf/t-sub.c
+++ b/tests/mpf/t-sub.c
@@ -179,7 +179,7 @@ check_data (void)
if (mpf_cmp (got, want) != 0)
{
- printf ("check_data() wrong reault at data[%d] (operands%s swapped)\n", i, swap ? "" : " not");
+ printf ("check_data() wrong result at data[%d] (operands%s swapped)\n", i, swap ? "" : " not");
mpf_trace ("x ", x);
mpf_trace ("y ", y);
mpf_trace ("got ", got);
diff --git a/tests/mpq/t-cmp.c b/tests/mpq/t-cmp.c
index 9aaed6afd..20e4723ce 100644
--- a/tests/mpq/t-cmp.c
+++ b/tests/mpq/t-cmp.c
@@ -44,7 +44,7 @@ ref_mpq_cmp (mpq_t a, mpq_t b)
}
#ifndef SIZE
-#define SIZE 8 /* increasing this lowers the probabilty of finding an error */
+#define SIZE 8 /* increasing this lowers the probability of finding an error */
#endif
int
diff --git a/tests/mpq/t-cmp_ui.c b/tests/mpq/t-cmp_ui.c
index 5b606f7bf..571e43ff1 100644
--- a/tests/mpq/t-cmp_ui.c
+++ b/tests/mpq/t-cmp_ui.c
@@ -44,7 +44,7 @@ ref_mpq_cmp_ui (mpq_t a, unsigned long int bn, unsigned long int bd)
}
#ifndef SIZE
-#define SIZE 8 /* increasing this lowers the probabilty of finding an error */
+#define SIZE 8 /* increasing this lowers the probability of finding an error */
#endif
int
diff --git a/tests/mpz/t-jac.c b/tests/mpz/t-jac.c
index 2ccb7cc9f..42c78c1d3 100644
--- a/tests/mpz/t-jac.c
+++ b/tests/mpz/t-jac.c
@@ -631,11 +631,11 @@ check_data (void)
"4902678867794567120224500687210807069172039735", 0 },
{ "2666617146103764067061017961903284334497474492754652499788571378062969111250584288683585223600172138551198546085281683283672592", "2666617146103764067061017961903284334497474492754652499788571378062969111250584288683585223600172138551198546085281683290481773", 1 },
- /* Exersizes the case asize == 1, btwos > 0 in mpz_jacobi. */
+ /* Exercises the case asize == 1, btwos > 0 in mpz_jacobi. */
{ "804609", "421248363205206617296534688032638102314410556521742428832362659824", 1 } ,
{ "4190209", "2239744742177804210557442048984321017460028974602978995388383905961079286530650825925074203175536427000", 1 },
- /* Exersizes the case asize == 1, btwos = 63 in mpz_jacobi
+ /* Exercises the case asize == 1, btwos = 63 in mpz_jacobi
(relevant when GMP_LIMB_BITS == 64). */
{ "17311973299000934401", "1675975991242824637446753124775689449936871337036614677577044717424700351103148799107651171694863695242089956242888229458836426332300124417011114380886016", 1 },
{ "3220569220116583677", "41859917623035396746", -1 },
diff --git a/tests/spinner.c b/tests/spinner.c
index 90b20aca2..c485a00c5 100644
--- a/tests/spinner.c
+++ b/tests/spinner.c
@@ -94,7 +94,7 @@ spinner_init (void)
}
alarm (1);
- /* unbufferred output so the spinner will show up */
+ /* unbuffered output so the spinner will show up */
setbuf (stdout, NULL);
}
diff --git a/tune/README b/tune/README
index 80acd7b1e..3e17b46ea 100644
--- a/tune/README
+++ b/tune/README
@@ -287,7 +287,7 @@ mpn_divrem_1, using division by 32 as an example.
EXAMPLE COMPARISONS - MULTIPLICATION
-mul_basecase takes a ".<r>" parameter. If positivie, it gives the second
+mul_basecase takes a ".<r>" parameter. If positive, it gives the second
(smaller) operand size. For example to show speeds for 3x3 up to 20x3 in
cycles,
diff --git a/tune/common.c b/tune/common.c
index 59cab7996..859eecbf4 100644
--- a/tune/common.c
+++ b/tune/common.c
@@ -350,7 +350,7 @@ speed_cache_fill (struct speed_params *s)
}
-/* Miscellanous options accepted by tune and speed programs under -o. */
+/* Miscellaneous options accepted by tune and speed programs under -o. */
void
speed_option_set (const char *s)
diff --git a/tune/powerpc.asm b/tune/powerpc.asm
index 4391ec102..15a48806d 100644
--- a/tune/powerpc.asm
+++ b/tune/powerpc.asm
@@ -6,7 +6,7 @@ dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or
dnl modify it under the terms of the GNU Lesser General Public License as
-dnl published by the Free Software Foundationn; either version 3 of the
+dnl published by the Free Software Foundation; either version 3 of the
dnl License, or (at your option) any later version.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful,
diff --git a/tune/powerpc64.asm b/tune/powerpc64.asm
index eb705466d..8a5d5143a 100644
--- a/tune/powerpc64.asm
+++ b/tune/powerpc64.asm
@@ -6,7 +6,7 @@ dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or
dnl modify it under the terms of the GNU Lesser General Public License as
-dnl published by the Free Software Foundationn; either version 3 of the
+dnl published by the Free Software Foundation; either version 3 of the
dnl License, or (at your option) any later version.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful,
diff --git a/tune/speed.h b/tune/speed.h
index b68993ff8..9331de252 100644
--- a/tune/speed.h
+++ b/tune/speed.h
@@ -787,7 +787,7 @@ int speed_routine_count_zeros_setup (struct speed_params *, mp_ptr, int, int);
TMP_MARK; \
SPEED_TMP_ALLOC_LIMBS (wp, s->size, s->align_wp); \
\
- /* (don't have a mechnanism to specify zp alignments) */ \
+ /* (don't have a mechanism to specify zp alignments) */ \
for (i = 0; i < K; i++) \
SPEED_TMP_ALLOC_LIMBS (zp[i], s->size, 0); \
\
diff --git a/tune/time.c b/tune/time.c
index a9e684ecf..86410c694 100644
--- a/tune/time.c
+++ b/tune/time.c
@@ -1,4 +1,4 @@
-/* Time routines for speed measurments.
+/* Time routines for speed measurements.
Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2010, 2011, 2012 Free Software
Foundation, Inc.
diff --git a/tune/tuneup.c b/tune/tuneup.c
index 20f9161e6..0d24a97a3 100644
--- a/tune/tuneup.c
+++ b/tune/tuneup.c
@@ -1828,7 +1828,7 @@ tune_gcdext_dc (void)
/* In tune_powm_sec we compute the table used by the win_size function. The
cutoff points are in exponent bits, disregarding other operand sizes. It is
- not possible to use the one framework since it currently uses a granilarity
+ not possible to use the one framework since it currently uses a granularity
of full limbs.
*/