summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorenge <enge@211d60ee-9f03-0410-a15a-8952a2c7a4e4>2014-01-23 15:05:17 +0000
committerenge <enge@211d60ee-9f03-0410-a15a-8952a2c7a4e4>2014-01-23 15:05:17 +0000
commit4bdab35c7f9419c1b7df6e90a931f2cbba61014a (patch)
treebc635e2adfd56ed03ec4a105ba8caab3ee2654b1
parent9889af2c744d3016021e91eb4a6c9e03a23397e8 (diff)
downloadmpc-4bdab35c7f9419c1b7df6e90a931f2cbba61014a.tar.gz
Finished rewrite of AGM in algorithms.tex, added bibliography entry.
git-svn-id: svn://scm.gforge.inria.fr/svn/mpc/trunk@1430 211d60ee-9f03-0410-a15a-8952a2c7a4e4
-rw-r--r--doc/algorithms.bib9
-rw-r--r--doc/algorithms.tex438
2 files changed, 312 insertions, 135 deletions
diff --git a/doc/algorithms.bib b/doc/algorithms.bib
index b6ad6b3..b6af68e 100644
--- a/doc/algorithms.bib
+++ b/doc/algorithms.bib
@@ -1,3 +1,12 @@
+@article {Cox84,
+ author = {David A. Cox},
+ title = {The arithmetic-geometric mean of {G}auss},
+ journal = {L'Enseignement Mathématique},
+ year = {1984},
+ volume = {30},
+ pages = {275--330}
+}
+
@phdthesis {Dupont06,
author = {R\'egis Dupont},
title = {Moyenne arithm\'etico-g\'eom\'etrique, suites de
diff --git a/doc/algorithms.tex b/doc/algorithms.tex
index 275ab3a..8b27b04 100644
--- a/doc/algorithms.tex
+++ b/doc/algorithms.tex
@@ -1,4 +1,4 @@
-\documentclass [12pt]{article}
+\documentclass [11pt]{article}
\usepackage[a4paper]{geometry}
\usepackage[utf8]{inputenc}
@@ -24,8 +24,8 @@
\newcommand {\round}{\operatorname {\circ}}
\DeclareMathOperator{\pinf}{\bigtriangleup}
\DeclareMathOperator{\minf}{\bigtriangledown}
-\DeclareMathOperator{\N}{\mathcal N}
\DeclareMathOperator{\A}{\mathcal A}
+\newcommand {\N}{\mathbb N}
\newcommand {\Z}{\mathbb Z}
\newcommand {\Q}{\mathbb Q}
\newcommand {\R}{\mathbb R}
@@ -35,6 +35,7 @@
\renewcommand {\leq}{\leqslant}
\renewcommand {\geq}{\geqslant}
\newcommand {\AGM}{\operatorname{AGM}}
+\newcommand {\sign}{\operatorname{sign}}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
@@ -50,7 +51,7 @@
\title {MPC: Algorithms and Error Analysis}
\author {Andreas Enge \and Philippe Th\'eveny \and Paul Zimmermann}
-\date {Draft; September 18, 2012}
+\date {Draft; January 23, 2014}
\begin {document}
\maketitle
@@ -538,6 +539,7 @@ For $0 < x_1 \leq 1$, we have $\appro {x_1}^2/2 \leq \appro {x_1}/2$ and
\subsection {Complex functions}
\subsubsection {Addition/subtraction}
+\label {sssec:propadd}
Using the notation introduced above, we consider
\[
@@ -1899,84 +1901,200 @@ Thus, assuming that $n 2^{-p} \leq 1$, the error estimates
\eqref {eq:powui_re} and \eqref {eq:powui_im} are valid with $n$
in the place of $n - 1$.
-\subsection{\texttt {mpc\_agm}}
+\subsection{\texttt {mpc\_agm}, \texttt {mpc\_agm1}}
+
+\paragraph {Definition.}
+Let $a$, $b$ be non-zero complex numbers.
+Define sequences of arithmetic means $(a_n)$
+and geometric means $(b_n)$
+by $\corr {a_0} = a$, $\corr {b_0} = b$,
+$\corr {a_{n+1}} = \frac {\corr {a_n} + \corr {b_n}}{2}$ and
+$\corr {b_{n+1}} = \sqrt {\corr {a_n} \corr {b_n}}$.
+At each step, there is a choice of sign for the square root.
+If $\corr {a_n}$ and $\corr {b_n}$ are at an angle different from $0$
+and $\pi$, then
+they define a two-dimensional pointed cone in the complex plane.
+Notice that $\corr {a_{n+1}}$ lies in this cone.
+Following \cite {Cox84} we call \emph {right} the choice that makes
+also $\corr {b_{n+1}}$ lie in the cone, and following \cite {CrTh10}
+we call the resulting sequences \emph {optimal}.
+There is a common limit of the sequences, the
+\emph {arithmetic-geometric mean}
+$\AGM (a, b)$.
+
+It is immediate that $\AGM$ is symmetric, that is,
+$\AGM (a, b) = \AGM (b, a)$, and homogeneous, that is,
+$\AGM (\lambda a, \lambda b) = \lambda \AGM (a, b)$ for any non-zero
+complex number $\lambda$.
+
+So we may assume that $|a| \geq |b|$, and
+$\AGM (a, b) = a \AGM (a_0, b_0)$
+with $\corr {a_0} = 1$, $\corr {b_0} = b/a$, $|\corr {b_0}| \leq 1$.
+
+We need to examine the corner cases.
+If one or both of $a$ and $b$ are zero, all geometric means are zero,
+and $\AGM (a, b) = 0$.
+If the angle between $a$ and $b$ is~$0$, then $\corr {b_0}$ is a positive real
+number, and $\AGM (1, \corr {b_0})$ may be computed with \mpfr.
+If the angle between $a$ and $b$ is $\pi$, then $\corr {b_0}$ is a negative real
+number larger than or equal to $-1$. The arithmetic mean of $1$ and $-1$
+is zero, so $\AGM (1, -1) = 0$.
+If $\corr {b_0} > -1$, we take the first geometric mean with a positive imaginary
+part, so that also $\Im (\AGM (1, \corr {b_0}))$ will be positive.
+Notice that $\Im (\AGM (1, \corr {b_0}))$ has the same sign as
+$\Im (\corr {b_0})$ unless
+$\corr {b_0}$ is real, so this choice determines our branch cut for $\AGM$.
+
+So in the following, we analyse the computation of $\AGM (1, \appro {b_0})$
+with $\appro {b_0} = \round (\corr {b_0}) \in \C \backslash \R^{\geq 0}$,
+where the real and imaginary parts of $\appro {b_0}$ are rounded towards~$0$,
+which ensures $| \appro {b_0} | \leq 1$
+with absolute errors of the real and imaginary parts at most \ulp {1}.
+In the following, relative errors will be most convenient to work with;
+by Proposition~\ref {prop:comabstorelerror} we have
+$\relerror (\appro {b_0}) \leq 2^{1-p}$, where $p$ is the working precision.
+
+
+\paragraph {The first iteration -- entering a quadrant.}
+If $\Re (\appro {b_0}) < 0$, then significant cancellation can occur for the
+arithmetic mean in the first iteration, which thus needs to be analysed
+separately.
+
+From now on, we use arbitrary rounding modes and apply
+Proposition~\ref {prop:comrelround} with $c = 1$
+(except for $\Re (\appro {a_1})$, which we need to round down or,
+equivalently, towards~$0$ for a later step in the analysis).
+We let $\corr {b_1} = \sqrt {\corr {b_0}}$ and
+$\appro {b_1} = \round \left( \sqrt {\appro {b_0}} \right)$ with
+\[
+\relerror (\appro {b_1}) \leq
+2^{1-p} + \left( 1 + 2^{1-p} \right) 2^{1-p}
+\leq 2^2 \cdot 2^{1-p}
+\]
+by \eqref {eq:propsqrt} and Proposition~\ref {prop:comrelround}.
+We let $\corr {a_1} = \frac {1 + \corr {b_0}}{2}$ and
+$\appro {a_1} = \round \left( \frac {1 + \appro {b_0}}{2} \right)$.
+The imaginary part of $\appro {a_1}$ has an error of at most \ulp{1},
+but following the discussion of \S\ref {sssec:propadd}, its real part
+may have an absolute error measured in $\Ulp$ of
+\[
+2^{\Exp (\Re (\appro {b_0})) - \Exp (\Re (\appro {a_1}))} + 1
+\leq
+2^{- \Exp (\Re (\appro {a_1}))} + 1
+\leq
+2^{- \Exp (\Re (\appro {a_1})) + 1},
+\]
+where we used that both $\Re (\appro {b_0})$ and $\Re (\appro {a_1})$ have
+non-positive exponents.
+By Proposition~\ref {prop:comabstorelerror},
+\[
+\relerror (\appro {a_1})
+\leq
+2^{- \Exp (\Re (\appro {a_1})) + 1} \cdot 2^{1-p}.
+\]
+If $\Re (\appro {b_0}) \geq 0$, then $\Exp (\Re (\appro {a_1})) = 0$, and
+we obtain a bound that is similar to the one above for
+$\relerror (\appro {b_1})$. If $\appro {b_0}$ approaches $-1$, then
+the relative error in $\appro {a_1}$ becomes arbitrarily bad, as
+$\appro {a_1}$ becomes arbitrarily small.
+
+Notice that, independently of the rounding mode,
+$\appro {a_1}$ and $\appro {b_1}$ lie in the same complex quadrant
+of numbers having non-negative real part and an imaginary part of the same
+sign as that of $\appro {b_0}$
+(or of positive imaginary part if $\appro {b_0}$ is real).
+During the remainder of the algorithm, we will not leave this quadrant
+and thus not see any more cancellation in the arithmetic mean.
+
+
+\paragraph {The idea of the analysis.}
+
+Let us first give the basic idea of the following, rather technical analysis.
+Assume a target precision of $N$ bits, that is, a target
+relative error of $2^{-N}$. If $a$ and $b$ are of roughly equal size, the AGM
+iteration converges quadratically, that is, the number of correct digits
+doubles in each step, and we need about $\log_2 N$ iterations. Unfortunately,
+when $a$ and $b$ are of very different sizes, we have slower convergence.
+To illustrate this, consider $a = 1$ and $b = 2^{-e}$; during the first
+iterations, $a_n \approx 2^{-n}$ and $b_n \approx 2^{-e/2^n}$. So we need
+to increase the number of iterations by roughly $\log_2$ of the absolute
+value of the exponent of $b/a$; the precise bound $B (N)$ on the number
+of iterations is given in~\eqref {eq:agmbound}.
+Moreover, when $\Re (b/a) < 0$, the situation of
+very different exponents in $a$ and $b$ may occur after one iteration through
+cancellation as explained above.
+
+Unlike Newton iterations, AGM iterations are not auto-correcting, but
+rounding errors propagate. We lose a constant number of bits per iteration;
+so to reach the desired precision of $N$ bits, we need to carry out all
+computations at a working precision of about $p = N + c B (N, b/a)$.
+The following discussion provides explicit bounds for all these quantities.
+
+
+\paragraph {Error propagation.}
Let
-\[
-z = \AGM (1, z_1).
-\]
-
-For the time being, we assume $\Re (z_1) \geq 0$ and $\Im (z_1) > 0$.
-Let $\corr {a_0} = \appro {a_0} = 1$,
-$\corr {b_0} = \appro {b_0} = z_1$, and let
-$\corr {a_n} = \frac {\corr {a_{n-1}} + \corr {b_{n-1}}}{2}$ and
-$\corr {b_n} = \sqrt {\corr {a_{n-1}} \corr {b_{n-1}}}$
-be the values of the AGM iteration performed with infinite precision and
-$\appro {a_n}, \appro {b_n}$ those performed with $p$-bit precision
-and some fixed rounding mode; let $c = \frac {1}{2}$ if this rounding mode
-is to nearest, $c = 1$ otherwise.
-Let $\eta_n = \relerror (\appro {a_n})$ and
-$\epsilon_n = \relerror (\appro {b_n})$.
-
-Among others, we will show in the following, using induction, that
-$\Re (\appro {a_n})$, $\Re (\appro {b_n}) \geq 0$ and
-$\Im (\appro {a_n})$, $\Im (\appro {b_n}) > 0$, regardless of the rounding mode.
-By assumption, this holds for $n = 0$.
-
-Let $c_n = \frac {\appro {a_{n-1}} + \appro {b_{n-1}}}{2}$, so that
-$\appro {a_n} = \round(c_n)$ satisfies
-$\Re (\appro {a_n}) \geq 0$, $\Im (\appro {a_n}) > 0$.
-By \eqref {eq:propaddrel}, the error of
-$c_n$ (relative to the correct value $a_n$) is bounded above by
-$\sqrt 2 \, \max (\epsilon_{n-1}, \eta_{n-1})$, division by~$2$ being exact.
-After rounding, we obtain
+$\corr {a_n} = \frac {\corr {a_{n-1}} + \corr {b_{n-1}}}{2}$,
+$\corr {c_n} = \corr {a_{n-1}} \corr {b_{n-1}}$,
+$\corr {b_n} = \sqrt {\corr {c_n}}$.
+The computation of $\appro {a_1}$ and $\appro {b_1}$ and their error
+analysis have been given above.
+For $n \geq 2$, we compute the sequences
+\begin {eqnarray*}
+\appro {a_n}
+& = & \round \left( \frac {\appro {a_{n-1}} + \appro {b_{n-1}}}{2} \right), \\
+\appro {c_n}
+& = & \round \left( \appro {a_{n-1}} \appro {b_{n-1}} \right), \\
+\appro {b_n}
+& = & \round \left( \sqrt {\appro {c_n}} \right).
+\end {eqnarray*}
+Then one sees by induction that
+$\appro {a_n}$ and $\appro {b_n}$ lie in the same quadrant as
+$\appro {a_1}$ and $\appro {b_1}$
+(or, for that matter, $\corr {a_1}$ and $\corr {b_1}$).
+
+Let $\alpha_n = \relerror (\appro {a_n}) / 2^{1-p}$,
+$\gamma_n = \relerror (\appro {c_n}) / 2^{1-p}$ and
+$\beta_n = \relerror (\appro {b_n}) / 2^{1-p}$.
+
+By \eqref {eq:propaddrel} and Proposition~\ref {prop:comrelround},
\begin {equation}
-\label {eq:agmeta}
-\eta_n \leq
-\sqrt 2 \, \max (\eta_{n-1}, \epsilon_{n-1})
-+ c \left( 1 + \sqrt 2 \, \max (\eta_{n-1}, \epsilon_{n-1}) \right) 2^{1-p}
+\label {eq:agmalpha}
+\alpha_n \leq
+\sqrt 2 \, \max (\alpha_{n-1}, \beta_{n-1})
+\left( 1 + 2^{1-p} \right) + 1.
\end {equation}
-by Proposition~\ref {prop:comrelround}.
-
-Let $d_n = \round \left( \appro {a_{n-1}} \appro {b_{n-1}} \right)$,
-so that $\Im (d_n) \geq 0$, and $\Im (d_n) = 0$ occurs only if
-$\appro {a_{n-1}}$ and $\appro {b_{n-1}}$ are both purely imaginary
-and $\Re (d_n) < 0$.
By \eqref {eq:propmulrel} and Proposition~\ref {prop:comrelround},
-the error of $d_n$ (relative to $a_{n-1} b_{n-1}$) is bounded above by
\begin {equation}
-\label {eq:agmzeta}
-\zeta_n =
-\eta_{n-1} + \epsilon_{n-1} + \eta_{n-1} \epsilon_{n-1}
-+ c (1 + \eta_{n-1} + \epsilon_{n-1} + \eta_{n-1} \epsilon_{n-1}) 2^{1-p}.
+\label {eq:agmgamma}
+\gamma_n \leq
+\left( \alpha_{n-1} + \beta_{n-1} + \alpha_{n-1} \beta_{n-1} 2^{1-p} \right)
+\left( 1 + 2^{1-p} \right) + 1.
\end {equation}
-Now $\appro {b_n} = \round (\sqrt {d_n})$ satisfies
-$\Re (\appro {b_n}) \geq 0$ and $\Im (\appro {b_n}) > 0$,
-and by \eqref {eq:propsqrt} and Proposition~\ref {prop:comrelround},
-assuming $\zeta_n \leq 1$, we obtain
+If we assume for the time being that $\gamma_n \leq 1 / (2^{1-p})$
+(which will be shown under additional constraints later), then
+\eqref {eq:propsqrt} and Proposition~\ref {prop:comrelround} imply
\begin {equation}
-\label {eq:agmepsilon}
-\epsilon_n \leq
-\zeta_n + c (1 + \zeta_n) 2^{1-p}.
+\label {eq:agmbeta}
+\beta_n \leq
+\gamma_n (1 + 2^{1-p}) + 1.
\end {equation}
-Let $r_n = (2^n - 1) d$ for some $d > 2 c$. Then one can show that
-for a sufficiently high
-precision~$p$ and not too many steps~$n$ compared to~$p$, one has
-$\eta_n, \epsilon_n \leq r_n 2^{1-p}$.
-Now we will prove this explicitly for $d=2^k$ with $k \geq 2$
-and $c \leq 1$, and show
-$\eta_n, \epsilon_n \leq r_n 2^{1-p} \leq 2^{n + k + 1 - p}$
-for $p \geq 2 (n + k) - 1$
-by induction over \eqref {eq:agmeta}, \eqref {eq:agmzeta}
-and~\eqref {eq:agmepsilon}. For $n = 0$, we have $\eta_0 = \epsilon_0 = 0$.
-Inductively, by \eqref {eq:agmzeta} we obtain
-\[
-\frac {\zeta_n}{2^{1-p}} \leq
+Let $r_n = (2^n - 1) r_1$ for some integer $r_1$ such that
+$\alpha_1$, $\beta_1 \leq r_1$.
+We may use $r_1 = 2^k$ with $k = \max (2, - \Exp (\Re (\appro {a_1}) + 1))$
+by the discussion of the first iteration above.
+We now show by induction that
+$\alpha_n, \beta_n \leq r_n \leq 2^{n + k}$ for $p \geq 2 (n + k) - 1$,
+that is, if the number of iterations $n$ is not too large compared to
+the working precision $p$.
+By \eqref {eq:agmgamma} we obtain
+\[
+\gamma_n \leq
2 r_{n-1} + 1 +
\left( r_{n-1}^2 + 2 r_{n-1} + r_{n-1}^2 2^{1-p} \right) 2^{1-p}.
\]
-From $r_{n-1} < 2^{k + n-1}$ we obtain
+From $r_{n-1} < 2^{n - 1 + k}$ we deduce
\[
r_{n-1}^2 2^{1-p} < 2^{2 (n + k) - 1 - p} \leq 1
\]
@@ -1984,33 +2102,35 @@ as long as $p \geq 2 (n + k) - 1$, and then, under the same condition,
\[
\left( r_{n-1}^2 + 2 r_{n-1} + r_{n-1}^2 2^{1-p} \right) 2^{1-p}
\leq
-(r_{n-1} + 1)^2 \, 2^{1-p} \leq (2^{k+n-1})^2 \, 2^{1-p} \leq 1.
+(r_{n-1} + 1)^2 \, 2^{1-p} \leq (2^{n+k-1})^2 \, 2^{1-p} \leq 1.
\]
Then
\[
-\frac {\zeta_n}{2^{1-p}}
+\gamma_n
\leq
2 r_{n-1} + 2
\leq
r_n - 2.
\]
-By \eqref {eq:agmepsilon},
+By \eqref {eq:agmbeta},
\[
-\frac {\epsilon_n}{2^{1-p}} \leq
+\beta_n
+\leq
r_n - 1 + r_n 2^{1-p}
\leq
r_n - 1 + 2^{n+k+1-p}
\leq r_n
\]
-under a milder than the previous condition. Finally by \eqref {eq:agmeta},
+under a milder than the previous condition. Finally by \eqref {eq:agmalpha},
for $p \geq 3$,
\[
-\frac {\eta_n}{2^{1-p}} \leq
+\alpha_n
+\leq
\sqrt 2 \, r_{n-1} + 1 + \sqrt {2} \, r_{n-1} 2^{1-p}
-\leq \frac{5}{4} \sqrt{2} r_{n-1} + 1 \leq 2 r_{n-1} + 2^k = r_n.
+\leq \frac{5}{4} \sqrt{2} r_{n-1} + 1 \leq 2 r_{n-1} + r_1 = r_n.
\]
This finishes the induction argument.
-To summarise, we have
+To summarise, we have for any fixed $n$ that
\begin {equation}
\label {eq:propagm}
\corr {a_n} = (1 + \theta_1) \appro {a_n}
@@ -2020,40 +2140,123 @@ To summarise, we have
2 (n + k) - 1 \leq p.
\end {equation}
-We now use \cite[Prop.~3.3]{Dupont06}, which states that for
-$z_1 \neq 0, 1$ and
-\begin{equation} \label{eq:agmbound}
-n \geq B (N, z_1)
- = \max \left( 1, \left\lceil \log_2 |\log_2 |z_1|| \right\rceil \right)
- + \lceil \log_2 (N+4) \rceil + 2
+
+\paragraph {Mathematical error.}
+
+We also need to estimate the error made by carrying out only a finite number
+of iterations. Let $z$ satisfy $\Re (z) \geq 0$ and $z \neq 0, 1$, and
+consider the optimal AGM sequences $(a_n')$ and $(b_n')$ computed (with
+infinite precision) from $a_0' = 1$ and $b_0' = z$. Let $N' \in \N$ and
+\[
+n \geq B' (N', z)
+ := \max \left( 1, \left\lceil \log_2 |\log_2 |z|| \right\rceil \right)
+ + \lceil \log_2 (N'+2) \rceil + 2
+\]
+(where $\log_2 0$ is to be understood as $- \infty$).
+Then by \cite[Prop.~3.3, p.~88]{Dupont06},
+\[
+a_n' = (1 + \theta_2) \AGM (1, z)
+\text { with }
+|\theta_2| \leq 2^{-N'}.
+\]
+(Notice that here, the relative error as defined in
+\cite[Def.~1.2, p.~20]{Dupont06} is taken with the roles of the correct
+and the approximated values reversed compared to our definition.)
+
+In our context, we may have $\Re (\corr {b_0}) < 0$, but after one iteration,
+$\Re (\corr {b_1} / \corr {a_1}) \geq 0$. So we consider
+$\corr {z} = b_0' = \corr {b_1} / \corr {a_1}$, so that by
+homogeneity, $\corr {a_n} = \corr {a_1} a_{n-1}'$,
+$\corr {b_n} = \corr {a_1} b_{n-1}'$ and
+$\AGM (1, \corr {b_0}) = \AGM (\corr {a_1}, \corr {b_1})
+= \corr {a_1} \AGM (1, \corr {z})$.
+Here
+\[
+|z|
+= \left| \frac {\corr {b_1}}{\corr {a_1}} \right|
+= \frac {|\sqrt {\corr {b_0}}|}{|\corr {a_1}|}
+= \frac {|\corr {b_0}|}{2 |\corr {a_1}|}
+\]
+is bounded above by
+\[
+\frac {1}{2 \, \Re (\corr {a_1})}
+\leq
+2^{- \Exp (\Re (\corr {a_1}))}
+=
+2^{- \Exp (\Re (\appro {a_1}))}
+\]
+(since $\Re (\appro {a_1})$ was rounded down)
+and below by
+\[
+\frac {|\corr {b_0}|}{2}
+\geq
+\frac {1}{2} \, \max \left( \Re (\corr {b_0}), |\Im (\corr {b_0})| \right)
+\geq
+\frac {1}{4} \,
+2^{\max \left( \Exp (\Re (\corr {b_0})), \Exp (\Im (\corr {b_0})) \right)}
+=
+2^{\max \left( \Exp (\Re (\appro {b_0})), \Exp (\Im (\appro {b_0})) \right)
+- 2}
+\]
+(since both parts of $\appro {b_0}$ were rounded down).
+So for $n \geq B (N)$ with
+\begin{equation}
+\label{eq:agmbound}
+\begin {aligned}
+B (N)
+ = & \left\lceil \log_2 \max (- \Exp (\Re (\appro {a_1})),
+ - \Exp (\Re (\appro {b_0})) + 2,
+ - \Exp (\Im (\appro {b_0})) + 2
+ ) \right\rceil \\
+ & + \lceil \log_2 (N+4) \rceil + 3
+\end {aligned}
\end{equation}
-(where $\log_2 0$ is to be understood as $- \infty$),
-we have
+(in which all exponents are non-positive)
+we reach
\[
-a_n = (1 + \theta_2) z
+\corr {a_n} = (1 + \theta_2) \AGM (1, \corr {b_0})
\text { with }
|\theta_2| \leq 2^{-(N+2)}.
\]
-So taking $\appro z = \appro {a_n}$ as an approximation for $z$, we obtain
-$z = \frac {1 + \theta_1}{1 + \theta_2} \appro z
-= (1 + \theta) \appro z$ with
+
+
+\paragraph {Total error and working precision}
+
+Combining with \eqref {eq:propagm} we obtain
+$\AGM (1, b_0) = \frac {1 + \theta_1}{1 + \theta_2} \appro {a_n}
+= (1 + \theta) \appro {a_n}$ with
\[
|\theta| \leq \frac {|\theta_1| + |\theta_2|}{|1 - |\theta_2||}
-\leq 2 \left( 2^{n+k+1-p} + 2^{-(N+2)} \right).
+\leq \frac {4}{3} \, \left( 2^{n+k+1-p} + 2^{-(N+2)} \right)
\]
+for $N \geq 2$.
+So after $n = B (N)$ steps of the AGM iteration at a working precision
+of $p \geq N + n + k + 3$, we obtain $\appro {a_n}$ with a relative error
+bounded by $\epsilon = \frac {2}{3} \cdot 2^{-N}$.
+
+Finally, we let $\corr {z} = \AGM (a, b) = a \AGM (1, \corr {b_0})$ and
+$\appro {z} = \round (\corr {a} \appro {a_n})$, where $a$ is known
+exactly.
+By~\eqref {eq:propmulrel} and Proposition~\ref {prop:comabstorelerror},
+using that $N \geq 2$ implies $B (N) \geq 7$ and $p \geq 12$,
+this leads to a relative error bounded by $2^{-N}$.
-So after $n = B (N, z_1)$ steps of the AGM iteration at a working precision
-of $p = N + n + k + 3$, we obtain $\appro z = \appro {a_n}$ with a relative error
-bounded by $2^{-N}$.
-Note that with $p = N + n + k + 3$, the constraint $2 (n + k) + 1 \leq p$ means
-$n + k \leq N+2$. Depending on the value of $z_1$, one might have to take
-$N$ larger than the required accuracy to ensure \eqref{eq:agmbound} is
-fulfilled.
+\paragraph {Parameter choice.}
-Using Propositions~\ref {prop:comrelerror} and~\ref {prop:relerror}, this
+Recall that in our analysis, $k = \max (2, - \Exp (\Re (\appro {a_1} + 1)))$
+is given by the input data. We may then choose a target relative error~$N$,
+which determines the number of iterations~$n = B (N)$ by~\eqref {eq:agmbound}.
+Then the working precision may be chosen as
+\[
+p = \max (2 (n + k) - 1,
+ N + n + k + 3).
+\]
+
+Using Propositions~\ref {prop:comrelerror} and~\ref {prop:relerror}, the
complex relative error may be translated into an error expressed in $\Ulp$.
-With $\appro {z} = \appro x + i \appro y$, let
+With $\appro {z} = \appro x + i \appro y$
+the computed approximation of $\corr {z} = \AGM (a, b)$, let
$k_R = \max (\Exp (\appro y) - \Exp (\appro x) + 1, 0) + 1$, and
$k_I = \max (\Exp (\appro x) - \Exp (\appro y) + 1, 0) + 1$.
Then we have
@@ -2064,43 +2267,8 @@ In practice, one should take this additional loss into account.
If rounding fails after the first computation, nevertheless the values of
$k_R$ and $k_I$ will most likely not change with a larger precision.
So one should let $k' = \max (k_R, k_I)$, replace $N$ by $N + k'$
-and adapt the precision accordingly.
-
-If $\Im(z_1) < 0$, then we can use the fact that $\AGM(1,\overline z_1)$ is
-the complex
-conjugate of $\AGM(1,z_1)$, thus the same error analysis applies;
-and if $\Im(z_1) = 0$, we are computing a real AGM and can call the
-corresponding MPFR function.
-
-Now assume $z_1$ is the rounding of some complex number $z_0$ with a
-relative error at most $2^{1-p}$. Then we have to replace $\epsilon_0 = 0$
-by $\epsilon_0 = 2^{1-p}$ in the above proof.
-This gives
-\[ \zeta_1 \leq \epsilon_0 + c (1 + \epsilon_0) 2^{1-p}
- \leq \left( 2 + 2^{1-p} \right) 2^{1-p} \leq \frac{9}{4} \, 2^{1-p}, \]
-and
-\[ \epsilon_1 \leq \zeta_1 + c (1 + \zeta_1) 2^{1-p}
- \leq \left( \frac{9}{4} + 1 + \frac{9}{4} \, 2^{1-p} \right) 2^{1-p}
- \leq 4 \cdot 2^{1-p} \]
-as long as $p \geq 3$. Thus the bound $\epsilon_1 \leq r_1 2^{1-p}$ still
-holds in that case.
-By \eqref {eq:agmeta}, also $\eta_1 \leq 4 \cdot 2^{1-p} = r_1 2^{1-p}$.
-
-\paragraph{The general case.}
-
-Assume now that $a, b$ are two arbitrary complex numbers, and we want to
-approximate $z = \AGM(a, b)$.
-We first have to precisely define $\AGM(a, b)$. At each step indeed there are
-two choices for $b_n = \sqrt{a_{n-1} b_{n-1}}$. We take a \emph{good} choice
-so that $|a_n - b_n| \leq |a_n + b_n|$; this corresponds to an \emph{optimal}
-AGM sequence \cite{CrTh10}. Unless $a_{n-1} / b_{n-1}$ is real, which happens
-exactly for $a / b$ real, this sequence is unique.
-It is shown in \cite{Dupont06} that the good choice is homogeneous with respect
-to multiplication of $a$ and $b$ by a non-zero complex number $\lambda$,
-so that $\AGM (\lambda a, \lambda b) = \lambda \AGM (a, b)$.
-In particular, $\AGM(a,b) = a \AGM(1,b/a) = b \AGM (1, a/b)$.
-We can thus restrict ourselves to the special case $\AGM(1, z)$ studied above
-with moreover $|z| \leq 1$.
+and adapt the precision and number of iterations accordingly.
+
\bibliographystyle{acm}
\bibliography{algorithms}